blob: 311882af08e33b9d43fe8781350d3e911c01ba7d [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000015#ifndef __ESP_H__
16#define __ESP_H__
Ed Warnickecb9cada2015-12-08 15:45:58 -070017
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010018#include <vnet/ip/ip.h>
Damjan Marion91f17dc2019-03-18 18:59:25 +010019#include <vnet/crypto/crypto.h>
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010020#include <vnet/ipsec/ipsec.h>
Arthur de Kerhorad95b062022-11-16 19:12:05 +010021#include <vnet/ipsec/ipsec.api_enum.h>
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010022
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070023typedef struct
24{
Neale Ranns41afb332019-07-16 06:19:35 -070025 union
26 {
27 u32 spi;
28 u8 spi_bytes[4];
29 };
Ed Warnickecb9cada2015-12-08 15:45:58 -070030 u32 seq;
31 u8 data[0];
32} esp_header_t;
33
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070034typedef struct
35{
Ed Warnickecb9cada2015-12-08 15:45:58 -070036 u8 pad_length;
37 u8 next_header;
38} esp_footer_t;
39
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070040/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070041typedef CLIB_PACKED (struct {
42 ip4_header_t ip4;
43 esp_header_t esp;
44}) ip4_and_esp_header_t;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070045/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070046
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070047/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070048typedef CLIB_PACKED (struct {
Klement Sekera4b089f22018-04-17 18:04:57 +020049 ip4_header_t ip4;
50 udp_header_t udp;
51 esp_header_t esp;
52}) ip4_and_udp_and_esp_header_t;
53/* *INDENT-ON* */
54
55/* *INDENT-OFF* */
56typedef CLIB_PACKED (struct {
Ed Warnickecb9cada2015-12-08 15:45:58 -070057 ip6_header_t ip6;
58 esp_header_t esp;
59}) ip6_and_esp_header_t;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070060/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070061
Neale Ranns47feb112019-04-11 15:14:07 +000062/**
Benoît Ganne490b9272021-01-22 18:03:09 +010063 * AES counter mode nonce
64 */
65typedef struct
66{
67 u32 salt;
68 u64 iv;
69 u32 ctr; /* counter: 1 in big-endian for ctr, unused for gcm */
70} __clib_packed esp_ctr_nonce_t;
71
72STATIC_ASSERT_SIZEOF (esp_ctr_nonce_t, 16);
73
74/**
Neale Ranns47feb112019-04-11 15:14:07 +000075 * AES GCM Additional Authentication data
76 */
77typedef struct esp_aead_t_
78{
79 /**
80 * for GCM: when using ESN it's:
81 * SPI, seq-hi, seg-low
82 * else
83 * SPI, seq-low
84 */
85 u32 data[3];
86} __clib_packed esp_aead_t;
87
Damjan Marionc59b9a22019-03-19 15:38:40 +010088#define ESP_SEQ_MAX (4294967295UL)
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000089
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010090u8 *format_esp_header (u8 * s, va_list * args);
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000091
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000092/* TODO seq increment should be atomic to be accessed by multiple workers */
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000093always_inline int
94esp_seq_advance (ipsec_sa_t * sa)
95{
Damjan Marion1e3aa5e2019-03-28 10:58:59 +010096 if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000097 {
98 if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
99 {
Damjan Mariond709cbc2019-03-26 13:16:42 +0100100 if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
101 sa->seq_hi == ESP_SEQ_MAX))
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +0000102 return 1;
103 sa->seq_hi++;
104 }
105 sa->seq++;
106 }
107 else
108 {
Damjan Mariond709cbc2019-03-26 13:16:42 +0100109 if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
110 sa->seq == ESP_SEQ_MAX))
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +0000111 return 1;
112 sa->seq++;
113 }
114
115 return 0;
116}
117
Fan Zhangf5395782020-04-29 14:00:03 +0100118always_inline u16
Neale Ranns5b891102021-06-28 13:31:28 +0000119esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
120 u32 seq_hi)
Neale Ranns47feb112019-04-11 15:14:07 +0000121{
122 esp_aead_t *aad;
123
Fan Zhangf5395782020-04-29 14:00:03 +0100124 aad = (esp_aead_t *) data;
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200125 aad->data[0] = esp->spi;
Neale Ranns47feb112019-04-11 15:14:07 +0000126
127 if (ipsec_sa_is_set_USE_ESN (sa))
128 {
129 /* SPI, seq-hi, seq-low */
Neale Ranns5b891102021-06-28 13:31:28 +0000130 aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi);
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200131 aad->data[2] = esp->seq;
Fan Zhangf5395782020-04-29 14:00:03 +0100132 return 12;
Neale Ranns47feb112019-04-11 15:14:07 +0000133 }
134 else
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200135 {
136 /* SPI, seq-low */
137 aad->data[1] = esp->seq;
Fan Zhangf5395782020-04-29 14:00:03 +0100138 return 8;
Damjan Marion30b8b4a2019-05-29 18:49:25 +0200139 }
Neale Ranns47feb112019-04-11 15:14:07 +0000140}
Fan Zhangf5395782020-04-29 14:00:03 +0100141
Arthur de Kerhorad95b062022-11-16 19:12:05 +0100142always_inline u32
143esp_encrypt_err_to_sa_err (u32 err)
Fan Zhang18f0e312020-10-19 13:08:34 +0100144{
Arthur de Kerhorad95b062022-11-16 19:12:05 +0100145 switch (err)
146 {
147 case ESP_ENCRYPT_ERROR_HANDOFF:
148 return IPSEC_SA_ERROR_HANDOFF;
149 case ESP_ENCRYPT_ERROR_SEQ_CYCLED:
150 return IPSEC_SA_ERROR_SEQ_CYCLED;
151 case ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
152 return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
153 case ESP_ENCRYPT_ERROR_CRYPTO_QUEUE_FULL:
154 return IPSEC_SA_ERROR_CRYPTO_QUEUE_FULL;
155 case ESP_ENCRYPT_ERROR_NO_BUFFERS:
156 return IPSEC_SA_ERROR_NO_BUFFERS;
157 case ESP_ENCRYPT_ERROR_NO_ENCRYPTION:
158 return IPSEC_SA_ERROR_NO_ENCRYPTION;
159 }
160 return ~0;
161}
162
163always_inline u32
164esp_decrypt_err_to_sa_err (u32 err)
165{
166 switch (err)
167 {
168 case ESP_DECRYPT_ERROR_HANDOFF:
169 return IPSEC_SA_ERROR_HANDOFF;
170 case ESP_DECRYPT_ERROR_DECRYPTION_FAILED:
171 return IPSEC_SA_ERROR_DECRYPTION_FAILED;
172 case ESP_DECRYPT_ERROR_INTEG_ERROR:
173 return IPSEC_SA_ERROR_INTEG_ERROR;
174 case ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR:
175 return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
176 case ESP_DECRYPT_ERROR_REPLAY:
177 return IPSEC_SA_ERROR_REPLAY;
178 case ESP_DECRYPT_ERROR_RUNT:
179 return IPSEC_SA_ERROR_RUNT;
180 case ESP_DECRYPT_ERROR_NO_BUFFERS:
181 return IPSEC_SA_ERROR_NO_BUFFERS;
182 case ESP_DECRYPT_ERROR_OVERSIZED_HEADER:
183 return IPSEC_SA_ERROR_OVERSIZED_HEADER;
184 case ESP_DECRYPT_ERROR_NO_TAIL_SPACE:
185 return IPSEC_SA_ERROR_NO_TAIL_SPACE;
186 case ESP_DECRYPT_ERROR_TUN_NO_PROTO:
187 return IPSEC_SA_ERROR_TUN_NO_PROTO;
188 case ESP_DECRYPT_ERROR_UNSUP_PAYLOAD:
189 return IPSEC_SA_ERROR_UNSUP_PAYLOAD;
190 }
191 return ~0;
192}
193
194always_inline void
195esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
196 u32 thread_index, u32 err, u16 index, u16 *nexts,
197 u16 drop_next, u32 sa_index)
198{
199 ipsec_set_next_index (b, node, thread_index, err,
200 esp_encrypt_err_to_sa_err (err), index, nexts,
201 drop_next, sa_index);
202}
203
204always_inline void
205esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
206 u32 thread_index, u32 err, u16 index, u16 *nexts,
207 u16 drop_next, u32 sa_index)
208{
209 ipsec_set_next_index (b, node, thread_index, err,
210 esp_decrypt_err_to_sa_err (err), index, nexts,
211 drop_next, sa_index);
Fan Zhang18f0e312020-10-19 13:08:34 +0100212}
213
214/* when submitting a frame is failed, drop all buffers in the frame */
Neale Rannsf16e9a52021-02-25 19:09:24 +0000215always_inline u32
216esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
Arthur de Kerhorad95b062022-11-16 19:12:05 +0100217 vlib_node_runtime_t *node, u32 err,
218 u32 ipsec_sa_err, u16 index, u32 *from,
219 u16 *nexts, u16 drop_next_index)
Fan Zhang18f0e312020-10-19 13:08:34 +0100220{
Arthur de Kerhorad95b062022-11-16 19:12:05 +0100221 vlib_buffer_t *b;
Fan Zhang18f0e312020-10-19 13:08:34 +0100222 u32 n_drop = f->n_elts;
223 u32 *bi = f->buffer_indices;
Neale Rannsf16e9a52021-02-25 19:09:24 +0000224
Fan Zhang18f0e312020-10-19 13:08:34 +0100225 while (n_drop--)
226 {
Neale Rannsf16e9a52021-02-25 19:09:24 +0000227 from[index] = bi[0];
Arthur de Kerhorad95b062022-11-16 19:12:05 +0100228 b = vlib_get_buffer (vm, bi[0]);
229 ipsec_set_next_index (b, node, vm->thread_index, err, ipsec_sa_err,
230 index, nexts, drop_next_index,
231 vnet_buffer (b)->ipsec.sad_index);
Fan Zhang18f0e312020-10-19 13:08:34 +0100232 bi++;
Neale Rannsf16e9a52021-02-25 19:09:24 +0000233 index++;
Fan Zhang18f0e312020-10-19 13:08:34 +0100234 }
Neale Rannsf16e9a52021-02-25 19:09:24 +0000235
236 return (f->n_elts);
Fan Zhang18f0e312020-10-19 13:08:34 +0100237}
238
Fan Zhangf5395782020-04-29 14:00:03 +0100239/**
240 * The post data structure to for esp_encrypt/decrypt_inline to write to
241 * vib_buffer_t opaque unused field, and for post nodes to pick up after
242 * dequeue.
243 **/
244typedef struct
245{
246 union
247 {
248 struct
249 {
250 u8 icv_sz;
251 u8 iv_sz;
252 ipsec_sa_flags_t flags;
253 u32 sa_index;
254 };
255 u64 sa_data;
256 };
257
258 u32 seq;
259 i16 current_data;
260 i16 current_length;
261 u16 hdr_sz;
262 u16 is_chain;
Neale Ranns5b891102021-06-28 13:31:28 +0000263 u32 seq_hi;
Fan Zhangf5395782020-04-29 14:00:03 +0100264} esp_decrypt_packet_data_t;
265
266STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
Benoît Ganne490b9272021-01-22 18:03:09 +0100267STATIC_ASSERT_OFFSET_OF (esp_decrypt_packet_data_t, seq, sizeof (u64));
Fan Zhangf5395782020-04-29 14:00:03 +0100268
269/* we are forced to store the decrypt post data into 2 separate places -
270 vlib_opaque and opaque2. */
271typedef struct
272{
273 vlib_buffer_t *lb;
274 u32 free_buffer_index;
275 u8 icv_removed;
276} esp_decrypt_packet_data2_t;
277
278typedef union
279{
280 u16 next_index;
281 esp_decrypt_packet_data_t decrypt_data;
282} esp_post_data_t;
283
284STATIC_ASSERT (sizeof (esp_post_data_t) <=
285 STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused),
286 "Custom meta-data too large for vnet_buffer_opaque_t");
287
288#define esp_post_data(b) \
289 ((esp_post_data_t *)((u8 *)((b)->opaque) \
290 + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused)))
291
292STATIC_ASSERT (sizeof (esp_decrypt_packet_data2_t) <=
293 STRUCT_SIZE_OF (vnet_buffer_opaque2_t, unused),
294 "Custom meta-data too large for vnet_buffer_opaque2_t");
295
296#define esp_post_data2(b) \
297 ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \
298 + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused)))
299
300typedef struct
301{
302 /* esp post node index for async crypto */
303 u32 esp4_post_next;
304 u32 esp6_post_next;
305 u32 esp4_tun_post_next;
306 u32 esp6_tun_post_next;
Neale Ranns4a58e492020-12-21 13:19:10 +0000307 u32 esp_mpls_tun_post_next;
Fan Zhangf5395782020-04-29 14:00:03 +0100308} esp_async_post_next_t;
309
310extern esp_async_post_next_t esp_encrypt_async_next;
311extern esp_async_post_next_t esp_decrypt_async_next;
312
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +0000313#endif /* __ESP_H__ */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700314
315/*
316 * fd.io coding-style-patch-verification: ON
317 *
318 * Local Variables:
319 * eval: (c-set-style "gnu")
320 * End:
321 */