blob: 1c3ce776ad277111d51ee30e0e8c014a8cea1275 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000015#ifndef __ESP_H__
16#define __ESP_H__
Ed Warnickecb9cada2015-12-08 15:45:58 -070017
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010018#include <vnet/ip/ip.h>
Damjan Marion91f17dc2019-03-18 18:59:25 +010019#include <vnet/crypto/crypto.h>
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010020#include <vnet/ipsec/ipsec.h>
Arthur de Kerhorad95b062022-11-16 19:12:05 +010021#include <vnet/ipsec/ipsec.api_enum.h>
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010022
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070023typedef struct
24{
Neale Ranns41afb332019-07-16 06:19:35 -070025 union
26 {
27 u32 spi;
28 u8 spi_bytes[4];
29 };
Ed Warnickecb9cada2015-12-08 15:45:58 -070030 u32 seq;
31 u8 data[0];
32} esp_header_t;
33
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070034typedef struct
35{
Ed Warnickecb9cada2015-12-08 15:45:58 -070036 u8 pad_length;
37 u8 next_header;
38} esp_footer_t;
39
40typedef CLIB_PACKED (struct {
41 ip4_header_t ip4;
42 esp_header_t esp;
43}) ip4_and_esp_header_t;
44
45typedef CLIB_PACKED (struct {
Klement Sekera4b089f22018-04-17 18:04:57 +020046 ip4_header_t ip4;
47 udp_header_t udp;
48 esp_header_t esp;
49}) ip4_and_udp_and_esp_header_t;
Klement Sekera4b089f22018-04-17 18:04:57 +020050
Klement Sekera4b089f22018-04-17 18:04:57 +020051typedef CLIB_PACKED (struct {
Ed Warnickecb9cada2015-12-08 15:45:58 -070052 ip6_header_t ip6;
53 esp_header_t esp;
54}) ip6_and_esp_header_t;
55
Neale Ranns47feb112019-04-11 15:14:07 +000056/**
Benoît Ganne490b9272021-01-22 18:03:09 +010057 * AES counter mode nonce
58 */
59typedef struct
60{
61 u32 salt;
62 u64 iv;
63 u32 ctr; /* counter: 1 in big-endian for ctr, unused for gcm */
64} __clib_packed esp_ctr_nonce_t;
65
66STATIC_ASSERT_SIZEOF (esp_ctr_nonce_t, 16);
67
68/**
Neale Ranns47feb112019-04-11 15:14:07 +000069 * AES GCM Additional Authentication data
70 */
71typedef struct esp_aead_t_
72{
73 /**
74 * for GCM: when using ESN it's:
75 * SPI, seq-hi, seg-low
76 * else
77 * SPI, seq-low
78 */
79 u32 data[3];
80} __clib_packed esp_aead_t;
81
Damjan Marionc59b9a22019-03-19 15:38:40 +010082#define ESP_SEQ_MAX (4294967295UL)
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000083
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010084u8 *format_esp_header (u8 * s, va_list * args);
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000085
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000086/* TODO seq increment should be atomic to be accessed by multiple workers */
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000087always_inline int
88esp_seq_advance (ipsec_sa_t * sa)
89{
Damjan Marion1e3aa5e2019-03-28 10:58:59 +010090 if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000091 {
92 if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
93 {
Damjan Mariond709cbc2019-03-26 13:16:42 +010094 if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
95 sa->seq_hi == ESP_SEQ_MAX))
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000096 return 1;
97 sa->seq_hi++;
98 }
99 sa->seq++;
100 }
101 else
102 {
Damjan Mariond709cbc2019-03-26 13:16:42 +0100103 if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
104 sa->seq == ESP_SEQ_MAX))
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +0000105 return 1;
106 sa->seq++;
107 }
108
109 return 0;
110}
111
Fan Zhangf5395782020-04-29 14:00:03 +0100112always_inline u16
Neale Ranns5b891102021-06-28 13:31:28 +0000113esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
114 u32 seq_hi)
Neale Ranns47feb112019-04-11 15:14:07 +0000115{
116 esp_aead_t *aad;
117
Fan Zhangf5395782020-04-29 14:00:03 +0100118 aad = (esp_aead_t *) data;
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200119 aad->data[0] = esp->spi;
Neale Ranns47feb112019-04-11 15:14:07 +0000120
121 if (ipsec_sa_is_set_USE_ESN (sa))
122 {
123 /* SPI, seq-hi, seq-low */
Neale Ranns5b891102021-06-28 13:31:28 +0000124 aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi);
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200125 aad->data[2] = esp->seq;
Fan Zhangf5395782020-04-29 14:00:03 +0100126 return 12;
Neale Ranns47feb112019-04-11 15:14:07 +0000127 }
128 else
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200129 {
130 /* SPI, seq-low */
131 aad->data[1] = esp->seq;
Fan Zhangf5395782020-04-29 14:00:03 +0100132 return 8;
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200133 }
Neale Ranns47feb112019-04-11 15:14:07 +0000134}
Fan Zhangf5395782020-04-29 14:00:03 +0100135
Arthur de Kerhorad95b062022-11-16 19:12:05 +0100136always_inline u32
137esp_encrypt_err_to_sa_err (u32 err)
Fan Zhang18f0e312020-10-19 13:08:34 +0100138{
Arthur de Kerhorad95b062022-11-16 19:12:05 +0100139 switch (err)
140 {
141 case ESP_ENCRYPT_ERROR_HANDOFF:
142 return IPSEC_SA_ERROR_HANDOFF;
143 case ESP_ENCRYPT_ERROR_SEQ_CYCLED:
144 return IPSEC_SA_ERROR_SEQ_CYCLED;
145 case ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
146 return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
147 case ESP_ENCRYPT_ERROR_CRYPTO_QUEUE_FULL:
148 return IPSEC_SA_ERROR_CRYPTO_QUEUE_FULL;
149 case ESP_ENCRYPT_ERROR_NO_BUFFERS:
150 return IPSEC_SA_ERROR_NO_BUFFERS;
151 case ESP_ENCRYPT_ERROR_NO_ENCRYPTION:
152 return IPSEC_SA_ERROR_NO_ENCRYPTION;
153 }
154 return ~0;
155}
156
157always_inline u32
158esp_decrypt_err_to_sa_err (u32 err)
159{
160 switch (err)
161 {
162 case ESP_DECRYPT_ERROR_HANDOFF:
163 return IPSEC_SA_ERROR_HANDOFF;
164 case ESP_DECRYPT_ERROR_DECRYPTION_FAILED:
165 return IPSEC_SA_ERROR_DECRYPTION_FAILED;
166 case ESP_DECRYPT_ERROR_INTEG_ERROR:
167 return IPSEC_SA_ERROR_INTEG_ERROR;
168 case ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR:
169 return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
170 case ESP_DECRYPT_ERROR_REPLAY:
171 return IPSEC_SA_ERROR_REPLAY;
172 case ESP_DECRYPT_ERROR_RUNT:
173 return IPSEC_SA_ERROR_RUNT;
174 case ESP_DECRYPT_ERROR_NO_BUFFERS:
175 return IPSEC_SA_ERROR_NO_BUFFERS;
176 case ESP_DECRYPT_ERROR_OVERSIZED_HEADER:
177 return IPSEC_SA_ERROR_OVERSIZED_HEADER;
178 case ESP_DECRYPT_ERROR_NO_TAIL_SPACE:
179 return IPSEC_SA_ERROR_NO_TAIL_SPACE;
180 case ESP_DECRYPT_ERROR_TUN_NO_PROTO:
181 return IPSEC_SA_ERROR_TUN_NO_PROTO;
182 case ESP_DECRYPT_ERROR_UNSUP_PAYLOAD:
183 return IPSEC_SA_ERROR_UNSUP_PAYLOAD;
184 }
185 return ~0;
186}
187
188always_inline void
189esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
190 u32 thread_index, u32 err, u16 index, u16 *nexts,
191 u16 drop_next, u32 sa_index)
192{
193 ipsec_set_next_index (b, node, thread_index, err,
194 esp_encrypt_err_to_sa_err (err), index, nexts,
195 drop_next, sa_index);
196}
197
198always_inline void
199esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
200 u32 thread_index, u32 err, u16 index, u16 *nexts,
201 u16 drop_next, u32 sa_index)
202{
203 ipsec_set_next_index (b, node, thread_index, err,
204 esp_decrypt_err_to_sa_err (err), index, nexts,
205 drop_next, sa_index);
Fan Zhang18f0e312020-10-19 13:08:34 +0100206}
207
Fan Zhangf5395782020-04-29 14:00:03 +0100208/**
209 * The post data structure to for esp_encrypt/decrypt_inline to write to
210 * vib_buffer_t opaque unused field, and for post nodes to pick up after
211 * dequeue.
212 **/
213typedef struct
214{
215 union
216 {
217 struct
218 {
219 u8 icv_sz;
220 u8 iv_sz;
221 ipsec_sa_flags_t flags;
222 u32 sa_index;
223 };
224 u64 sa_data;
225 };
226
227 u32 seq;
228 i16 current_data;
229 i16 current_length;
230 u16 hdr_sz;
231 u16 is_chain;
Neale Ranns5b891102021-06-28 13:31:28 +0000232 u32 seq_hi;
Fan Zhangf5395782020-04-29 14:00:03 +0100233} esp_decrypt_packet_data_t;
234
235STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
Benoît Ganne490b9272021-01-22 18:03:09 +0100236STATIC_ASSERT_OFFSET_OF (esp_decrypt_packet_data_t, seq, sizeof (u64));
Fan Zhangf5395782020-04-29 14:00:03 +0100237
238/* we are forced to store the decrypt post data into 2 separate places -
239 vlib_opaque and opaque2. */
240typedef struct
241{
242 vlib_buffer_t *lb;
243 u32 free_buffer_index;
244 u8 icv_removed;
245} esp_decrypt_packet_data2_t;
246
247typedef union
248{
249 u16 next_index;
250 esp_decrypt_packet_data_t decrypt_data;
251} esp_post_data_t;
252
253STATIC_ASSERT (sizeof (esp_post_data_t) <=
254 STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused),
255 "Custom meta-data too large for vnet_buffer_opaque_t");
256
257#define esp_post_data(b) \
258 ((esp_post_data_t *)((u8 *)((b)->opaque) \
259 + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused)))
260
261STATIC_ASSERT (sizeof (esp_decrypt_packet_data2_t) <=
262 STRUCT_SIZE_OF (vnet_buffer_opaque2_t, unused),
263 "Custom meta-data too large for vnet_buffer_opaque2_t");
264
265#define esp_post_data2(b) \
266 ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \
267 + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused)))
268
269typedef struct
270{
271 /* esp post node index for async crypto */
272 u32 esp4_post_next;
273 u32 esp6_post_next;
274 u32 esp4_tun_post_next;
275 u32 esp6_tun_post_next;
Neale Ranns4a58e492020-12-21 13:19:10 +0000276 u32 esp_mpls_tun_post_next;
Fan Zhangf5395782020-04-29 14:00:03 +0100277} esp_async_post_next_t;
278
279extern esp_async_post_next_t esp_encrypt_async_next;
280extern esp_async_post_next_t esp_decrypt_async_next;
281
Xiaoming Jiang0c1454c2023-05-05 02:28:20 +0000282/* when submitting a frame is failed, drop all buffers in the frame */
283always_inline u32
284esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
285 vlib_node_runtime_t *node, u32 err,
286 u32 ipsec_sa_err, u16 index, u32 *from,
287 u16 *nexts, u16 drop_next_index,
288 bool is_encrypt)
289{
290 vlib_buffer_t *b;
291 u32 n_drop = f->n_elts;
292 u32 *bi = f->buffer_indices;
293
294 while (n_drop--)
295 {
296 u32 sa_index;
297
298 from[index] = bi[0];
299 b = vlib_get_buffer (vm, bi[0]);
300
301 if (is_encrypt)
302 {
303 sa_index = vnet_buffer (b)->ipsec.sad_index;
304 }
305 else
306 {
307 sa_index = esp_post_data (b)->decrypt_data.sa_index;
308 }
309
310 ipsec_set_next_index (b, node, vm->thread_index, err, ipsec_sa_err,
311 index, nexts, drop_next_index, sa_index);
312 bi++;
313 index++;
314 }
315
316 return (f->n_elts);
317}
318
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +0000319#endif /* __ESP_H__ */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700320
321/*
322 * fd.io coding-style-patch-verification: ON
323 *
324 * Local Variables:
325 * eval: (c-set-style "gnu")
326 * End:
327 */