blob: cddda1f8e544b4fc219b4f9bc6d43882c1166f0f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * esp_decrypt.c : IPSec ESP decrypt node
3 *
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/vnet.h>
19#include <vnet/api_errno.h>
20#include <vnet/ip/ip.h>
21
22#include <vnet/ipsec/ipsec.h>
23#include <vnet/ipsec/esp.h>
Neale Ranns918c1612019-02-21 23:34:59 -080024#include <vnet/ipsec/ipsec_io.h>
Neale Rannsc87b66c2019-02-07 07:26:12 -080025#include <vnet/ipsec/ipsec_tun.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070026
Ed Warnickecb9cada2015-12-08 15:45:58 -070027#define foreach_esp_decrypt_next \
28_(DROP, "error-drop") \
Kingwel Xie561d1ca2019-03-05 22:56:17 -050029_(IP4_INPUT, "ip4-input-no-checksum") \
Neale Rannsc87b66c2019-02-07 07:26:12 -080030_(IP6_INPUT, "ip6-input")
Ed Warnickecb9cada2015-12-08 15:45:58 -070031
32#define _(v, s) ESP_DECRYPT_NEXT_##v,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070033typedef enum
34{
Ed Warnickecb9cada2015-12-08 15:45:58 -070035 foreach_esp_decrypt_next
36#undef _
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070037 ESP_DECRYPT_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070038} esp_decrypt_next_t;
39
40
Damjan Marionb4fff3a2019-03-25 15:54:40 +010041#define foreach_esp_decrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(DECRYPTION_FAILED, "ESP decryption failed") \
44 _(INTEG_ERROR, "Integrity check failed") \
45 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
46 _(REPLAY, "SA replayed packet") \
Damjan Mariona829b132019-04-24 23:39:16 +020047 _(RUNT, "undersized packet") \
Damjan Marionb4fff3a2019-03-25 15:54:40 +010048 _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
49 _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
Neale Ranns12989b52019-09-26 16:20:19 +000050 _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
51 _(TUN_NO_PROTO, "no tunnel protocol") \
Ed Warnickecb9cada2015-12-08 15:45:58 -070052
53
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070054typedef enum
55{
Ed Warnickecb9cada2015-12-08 15:45:58 -070056#define _(sym,str) ESP_DECRYPT_ERROR_##sym,
57 foreach_esp_decrypt_error
58#undef _
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070059 ESP_DECRYPT_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -070060} esp_decrypt_error_t;
61
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070062static char *esp_decrypt_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070063#define _(sym,string) string,
64 foreach_esp_decrypt_error
65#undef _
66};
67
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070068typedef struct
69{
Damjan Marionb4fff3a2019-03-25 15:54:40 +010070 u32 seq;
Neale Ranns6afaae12019-07-17 15:07:14 +000071 u32 sa_seq;
72 u32 sa_seq_hi;
Ed Warnickecb9cada2015-12-08 15:45:58 -070073 ipsec_crypto_alg_t crypto_alg;
74 ipsec_integ_alg_t integ_alg;
75} esp_decrypt_trace_t;
76
77/* packet trace format function */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070078static u8 *
79format_esp_decrypt_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070080{
81 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
82 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070083 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070084
Neale Ranns6afaae12019-07-17 15:07:14 +000085 s =
86 format (s,
87 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u",
88 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
89 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi);
Ed Warnickecb9cada2015-12-08 15:45:58 -070090 return s;
91}
92
Damjan Marionb4fff3a2019-03-25 15:54:40 +010093typedef struct
Ed Warnickecb9cada2015-12-08 15:45:58 -070094{
Damjan Marionb4fff3a2019-03-25 15:54:40 +010095 union
96 {
97 struct
98 {
99 u8 icv_sz;
100 u8 iv_sz;
Neale Rannsc87b66c2019-02-07 07:26:12 -0800101 ipsec_sa_flags_t flags;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100102 u32 sa_index;
103 };
104 u64 sa_data;
105 };
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106
Neale Ranns6afaae12019-07-17 15:07:14 +0000107 u32 seq;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100108 i16 current_data;
109 i16 current_length;
110 u16 hdr_sz;
111} esp_decrypt_packet_data_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112
Neale Ranns6afaae12019-07-17 15:07:14 +0000113STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100115#define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200117always_inline uword
118esp_decrypt_inline (vlib_main_t * vm,
119 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
Neale Rannsc87b66c2019-02-07 07:26:12 -0800120 int is_ip6, int is_tun)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122 ipsec_main_t *im = &ipsec_main;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100123 u32 thread_index = vm->thread_index;
124 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
125 u16 len;
126 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
Damjan Marionc98275f2019-03-06 14:05:01 +0100127 u32 *from = vlib_frame_vector_args (from_frame);
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100128 u32 n, n_left = from_frame->n_vectors;
129 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
Damjan Marionc98275f2019-03-06 14:05:01 +0100130 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100131 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
132 esp_decrypt_packet_data_t cpd = { };
133 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
134 const u8 esp_sz = sizeof (esp_header_t);
135 ipsec_sa_t *sa0 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100137 vlib_get_buffers (vm, from, b, n_left);
138 vec_reset_length (ptd->crypto_ops);
139 vec_reset_length (ptd->integ_ops);
140 clib_memset_u16 (nexts, -1, n_left);
141
142 while (n_left > 0)
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700143 {
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100144 u8 *payload;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700145
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100146 if (n_left > 2)
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700147 {
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100148 u8 *p;
149 vlib_prefetch_buffer_header (b[2], LOAD);
150 p = vlib_buffer_get_current (b[1]);
151 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
152 p -= CLIB_CACHE_LINE_BYTES;
153 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
154 }
155
156 if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
157 {
158 b[0]->error = node->errors[ESP_DECRYPT_ERROR_CHAINED_BUFFER];
Damjan Marion1f4e1cb2019-03-28 19:19:31 +0100159 next[0] = ESP_DECRYPT_NEXT_DROP;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100160 goto next;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700161 }
Damjan Marionc98275f2019-03-06 14:05:01 +0100162
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100163 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
Damjan Marionc98275f2019-03-06 14:05:01 +0100164 {
Damjan Marion867dfdd2019-06-05 15:42:54 +0200165 if (current_sa_pkts)
166 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
167 current_sa_index,
168 current_sa_pkts,
169 current_sa_bytes);
170 current_sa_bytes = current_sa_pkts = 0;
171
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100172 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
173 sa0 = pool_elt_at_index (im->sad, current_sa_index);
Damjan Marion7c22ff72019-04-04 12:25:44 +0200174 cpd.icv_sz = sa0->integ_icv_size;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100175 cpd.iv_sz = sa0->crypto_iv_size;
176 cpd.flags = sa0->flags;
177 cpd.sa_index = current_sa_index;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100178 }
179
180 /* store packet data for next round for easier prefetch */
181 pd->sa_data = cpd.sa_data;
182 pd->current_data = b[0]->current_data;
183 pd->current_length = b[0]->current_length;
184 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
185 payload = b[0]->data + pd->current_data;
Neale Ranns6afaae12019-07-17 15:07:14 +0000186 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100187
188 /* we need 4 extra bytes for HMAC calculation when ESN are used */
Neale Ranns49e7ef62019-04-10 17:24:29 +0000189 if (ipsec_sa_is_set_USE_ESN (sa0) && pd->icv_sz &&
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100190 (pd->current_data + pd->current_length + 4 > buffer_data_size))
191 {
192 b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE];
193 next[0] = ESP_DECRYPT_NEXT_DROP;
194 goto next;
195 }
196
197 /* anti-reply check */
Neale Ranns6afaae12019-07-17 15:07:14 +0000198 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100199 {
200 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
201 next[0] = ESP_DECRYPT_NEXT_DROP;
202 goto next;
203 }
204
Damjan Mariona829b132019-04-24 23:39:16 +0200205 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
206 {
207 b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
208 next[0] = ESP_DECRYPT_NEXT_DROP;
209 goto next;
210 }
211
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100212 len = pd->current_length - cpd.icv_sz;
213 current_sa_pkts += 1;
214 current_sa_bytes += pd->current_length;
215
Neale Ranns47feb112019-04-11 15:14:07 +0000216 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100217 {
218 vnet_crypto_op_t *op;
219 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
220
Damjan Marion060bfb92019-03-29 13:47:54 +0100221 vnet_crypto_op_init (op, sa0->integ_op_id);
Damjan Mariond1bed682019-04-24 15:20:35 +0200222 op->key_index = sa0->integ_key_index;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100223 op->src = payload;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100224 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
225 op->user_data = b - bufs;
Damjan Marion060bfb92019-03-29 13:47:54 +0100226 op->digest = payload + len;
227 op->digest_len = cpd.icv_sz;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100228 op->len = len;
Neale Ranns49e7ef62019-04-10 17:24:29 +0000229 if (ipsec_sa_is_set_USE_ESN (sa0))
Damjan Marionc98275f2019-03-06 14:05:01 +0100230 {
Neale Ranns6afaae12019-07-17 15:07:14 +0000231 /* shift ICV by 4 bytes to insert ESN */
232 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100233 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
234 clib_memcpy_fast (tmp, payload + len, ESP_MAX_ICV_SIZE);
Neale Ranns6afaae12019-07-17 15:07:14 +0000235 clib_memcpy_fast (payload + len, &seq_hi, sz);
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100236 clib_memcpy_fast (payload + len + sz, tmp, ESP_MAX_ICV_SIZE);
237 op->len += sz;
Neale Ranns49e7ef62019-04-10 17:24:29 +0000238 op->digest += sz;
Damjan Marionc98275f2019-03-06 14:05:01 +0100239 }
240 }
241
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100242 payload += esp_sz;
243 len -= esp_sz;
Damjan Marionc98275f2019-03-06 14:05:01 +0100244
Damjan Marion060bfb92019-03-29 13:47:54 +0100245 if (sa0->crypto_enc_op_id != VNET_CRYPTO_OP_NONE)
Damjan Marionc98275f2019-03-06 14:05:01 +0100246 {
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100247 vnet_crypto_op_t *op;
248 vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
Damjan Marion060bfb92019-03-29 13:47:54 +0100249 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
Damjan Mariond1bed682019-04-24 15:20:35 +0200250 op->key_index = sa0->crypto_key_index;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100251 op->iv = payload;
Neale Ranns47feb112019-04-11 15:14:07 +0000252
253 if (ipsec_sa_is_set_IS_AEAD (sa0))
254 {
255 esp_header_t *esp0;
256 esp_aead_t *aad;
257 u8 *scratch;
Neale Ranns47feb112019-04-11 15:14:07 +0000258
259 /*
260 * construct the AAD and the nonce (Salt || IV) in a scratch
261 * space in front of the IP header.
262 */
263 scratch = payload - esp_sz;
264 esp0 = (esp_header_t *) (scratch);
265
266 scratch -= (sizeof (*aad) + pd->hdr_sz);
267 op->aad = scratch;
268
269 esp_aad_fill (op, esp0, sa0);
270
271 /*
272 * we don't need to refer to the ESP header anymore so we
273 * can overwrite it with the salt and use the IV where it is
274 * to form the nonce = (Salt + IV)
275 */
Neale Ranns47feb112019-04-11 15:14:07 +0000276 op->iv -= sizeof (sa0->salt);
Neale Ranns80f6fd52019-04-16 02:41:34 +0000277 clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
Neale Ranns47feb112019-04-11 15:14:07 +0000278
279 op->tag = payload + len;
280 op->tag_len = 16;
281 }
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100282 op->src = op->dst = payload += cpd.iv_sz;
Neale Ranns0a0c7ee2019-04-13 15:30:21 +0000283 op->len = len - cpd.iv_sz;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100284 op->user_data = b - bufs;
Damjan Marionc98275f2019-03-06 14:05:01 +0100285 }
286
287 /* next */
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100288 next:
289 n_left -= 1;
Damjan Marionc98275f2019-03-06 14:05:01 +0100290 next += 1;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100291 pd += 1;
292 b += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 }
Damjan Marionc98275f2019-03-06 14:05:01 +0100294
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100295 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
296 current_sa_index, current_sa_pkts,
297 current_sa_bytes);
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200298
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100299 if ((n = vec_len (ptd->integ_ops)))
300 {
301 vnet_crypto_op_t *op = ptd->integ_ops;
302 n -= vnet_crypto_process_ops (vm, op, n);
303 while (n)
304 {
305 ASSERT (op - ptd->integ_ops < vec_len (ptd->integ_ops));
306 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
307 {
308 u32 err, bi = op->user_data;
309 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
310 err = ESP_DECRYPT_ERROR_INTEG_ERROR;
311 else
312 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
313 bufs[bi]->error = node->errors[err];
314 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
315 n--;
316 }
317 op++;
318 }
319 }
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100320 if ((n = vec_len (ptd->crypto_ops)))
321 {
322 vnet_crypto_op_t *op = ptd->crypto_ops;
323 n -= vnet_crypto_process_ops (vm, op, n);
324 while (n)
325 {
326 ASSERT (op - ptd->crypto_ops < vec_len (ptd->crypto_ops));
327 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
328 {
Neale Ranns92e93842019-04-08 07:36:50 +0000329 u32 err, bi;
330
331 bi = op->user_data;
332
333 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
Neale Ranns21ada3b2019-04-11 08:18:34 +0000334 err = ESP_DECRYPT_ERROR_DECRYPTION_FAILED;
Neale Ranns92e93842019-04-08 07:36:50 +0000335 else
336 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
337
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100338 bufs[bi]->error = node->errors[err];
339 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
340 n--;
341 }
342 op++;
343 }
344 }
345
346 /* Post decryption ronud - adjust packet data start and length and next
347 node */
348
349 n_left = from_frame->n_vectors;
350 next = nexts;
351 pd = pkt_data;
352 b = bufs;
353
354 while (n_left)
355 {
356 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL |
357 IPSEC_SA_FLAG_IS_TUNNEL_V6;
358
359 if (n_left >= 2)
360 {
361 void *data = b[1]->data + pd[1].current_data;
362
363 /* buffer metadata */
364 vlib_prefetch_buffer_header (b[1], LOAD);
365
366 /* esp_footer_t */
367 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
368 CLIB_CACHE_LINE_BYTES, LOAD);
369
370 /* packet headers */
371 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
372 CLIB_CACHE_LINE_BYTES * 2, LOAD);
373 }
374
375 if (next[0] < ESP_DECRYPT_N_NEXT)
376 goto trace;
377
378 sa0 = vec_elt_at_index (im->sad, pd->sa_index);
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100379
Neale Ranns3b9374f2019-08-01 04:45:15 -0700380 /*
381 * redo the anti-reply check
382 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
383 * and s and s+1 are in the window. When we did the anti-replay
384 * check above we did so against the state of the window (W),
385 * after packet s-1. So each of the packets in the sequence will be
386 * accepted.
387 * This time s will be cheked against Ws-1, s+1 chceked against Ws
388 * (i.e. the window state is updated/advnaced)
389 * so this time the successive s+! packet will be dropped.
390 * This is a consequence of batching the decrypts. If the
391 * check-dcrypt-advance process was done for each packet it would
392 * be fine. But we batch the decrypts because it's much more efficient
393 * to do so in SW and if we offload to HW and the process is async.
394 *
395 * You're probably thinking, but this means an attacker can send the
396 * above sequence and cause VPP to perform decrpyts that will fail,
397 * and that's true. But if the attacker can determine s (a valid
398 * sequence number in the window) which is non-trivial, it can generate
399 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
400 * implementation, sequential or batching, from decrypting these.
401 */
402 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
403 {
404 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
405 next[0] = ESP_DECRYPT_NEXT_DROP;
406 goto trace;
407 }
408
Neale Ranns6afaae12019-07-17 15:07:14 +0000409 ipsec_sa_anti_replay_advance (sa0, pd->seq);
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100410
411 esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data +
412 pd->current_length - sizeof (*f) -
413 pd->icv_sz);
414 u16 adv = pd->iv_sz + esp_sz;
415 u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
416
Neale Rannsc87b66c2019-02-07 07:26:12 -0800417 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100418 {
419 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
420 sizeof (udp_header_t) : 0;
421 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
422 u8 *old_ip = b[0]->data + pd->current_data - ip_hdr_sz - udp_sz;
423 u8 *ip = old_ip + adv + udp_sz;
424
425 if (is_ip6 && ip_hdr_sz > 64)
426 memmove (ip, old_ip, ip_hdr_sz);
427 else
428 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
429
430 b[0]->current_data = pd->current_data + adv - ip_hdr_sz;
431 b[0]->current_length = pd->current_length + ip_hdr_sz - tail - adv;
432
433 if (is_ip6)
434 {
435 ip6_header_t *ip6 = (ip6_header_t *) ip;
436 u16 len = clib_net_to_host_u16 (ip6->payload_length);
437 len -= adv + tail;
438 ip6->payload_length = clib_host_to_net_u16 (len);
439 ip6->protocol = f->next_header;
440 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
441 }
442 else
443 {
444 ip4_header_t *ip4 = (ip4_header_t *) ip;
445 ip_csum_t sum = ip4->checksum;
446 u16 len = clib_net_to_host_u16 (ip4->length);
447 len = clib_host_to_net_u16 (len - adv - tail - udp_sz);
448 sum = ip_csum_update (sum, ip4->protocol, f->next_header,
449 ip4_header_t, protocol);
450 sum = ip_csum_update (sum, ip4->length, len,
451 ip4_header_t, length);
452 ip4->checksum = ip_csum_fold (sum);
453 ip4->protocol = f->next_header;
454 ip4->length = len;
455 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
456 }
457 }
458 else
459 {
460 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
461 {
462 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
463 b[0]->current_data = pd->current_data + adv;
Matthew G Smith2a2e5932019-05-22 13:34:08 -0500464 b[0]->current_length = pd->current_length - adv - tail;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100465 }
466 else if (f->next_header == IP_PROTOCOL_IPV6)
467 {
468 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
469 b[0]->current_data = pd->current_data + adv;
Matthew G Smith2a2e5932019-05-22 13:34:08 -0500470 b[0]->current_length = pd->current_length - adv - tail;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100471 }
472 else
473 {
474 next[0] = ESP_DECRYPT_NEXT_DROP;
475 b[0]->error = node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
Neale Rannsc87b66c2019-02-07 07:26:12 -0800476 goto trace;
477 }
478 if (is_tun)
479 {
480 if (ipsec_sa_is_set_IS_PROTECT (sa0))
481 {
482 /*
483 * Check that the reveal IP header matches that
484 * of the tunnel we are protecting
485 */
486 const ipsec_tun_protect_t *itp;
487
488 itp =
489 ipsec_tun_protect_get (vnet_buffer (b[0])->
490 ipsec.protect_index);
491 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
492 {
493 const ip4_header_t *ip4;
494
495 ip4 = vlib_buffer_get_current (b[0]);
496
497 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
498 &ip4->dst_address) ||
499 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
500 &ip4->src_address))
Neale Ranns12989b52019-09-26 16:20:19 +0000501 {
502 next[0] = ESP_DECRYPT_NEXT_DROP;
503 b[0]->error =
504 node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
505 }
Neale Rannsc87b66c2019-02-07 07:26:12 -0800506 }
507 else if (f->next_header == IP_PROTOCOL_IPV6)
508 {
509 const ip6_header_t *ip6;
510
511 ip6 = vlib_buffer_get_current (b[0]);
512
513 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
514 &ip6->dst_address) ||
515 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
516 &ip6->src_address))
Neale Ranns12989b52019-09-26 16:20:19 +0000517 {
518 next[0] = ESP_DECRYPT_NEXT_DROP;
519 b[0]->error =
520 node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
521 }
Neale Rannsc87b66c2019-02-07 07:26:12 -0800522 }
523 }
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100524 }
525 }
526
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100527 trace:
528 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
529 {
530 esp_decrypt_trace_t *tr;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100531 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
532 sa0 = pool_elt_at_index (im->sad,
533 vnet_buffer (b[0])->ipsec.sad_index);
534 tr->crypto_alg = sa0->crypto_alg;
535 tr->integ_alg = sa0->integ_alg;
Neale Ranns6afaae12019-07-17 15:07:14 +0000536 tr->seq = pd->seq;
537 tr->sa_seq = sa0->last_seq;
538 tr->sa_seq_hi = sa0->seq_hi;
Damjan Marionb4fff3a2019-03-25 15:54:40 +0100539 }
540
541 /* next */
542 n_left -= 1;
543 next += 1;
544 pd += 1;
545 b += 1;
546 }
547
548 n_left = from_frame->n_vectors;
549 vlib_node_increment_counter (vm, node->node_index,
550 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
551
552 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
553
554 b = bufs;
555 return n_left;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700556}
557
Klement Sekerab8f35442018-10-29 13:38:19 +0100558VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
559 vlib_node_runtime_t * node,
560 vlib_frame_t * from_frame)
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200561{
Neale Rannsc87b66c2019-02-07 07:26:12 -0800562 return esp_decrypt_inline (vm, node, from_frame, 0, 0);
563}
564
565VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
566 vlib_node_runtime_t * node,
567 vlib_frame_t * from_frame)
568{
569 return esp_decrypt_inline (vm, node, from_frame, 0, 1);
570}
571
572VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
573 vlib_node_runtime_t * node,
574 vlib_frame_t * from_frame)
575{
576 return esp_decrypt_inline (vm, node, from_frame, 1, 0);
577}
578
579VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
580 vlib_node_runtime_t * node,
581 vlib_frame_t * from_frame)
582{
583 return esp_decrypt_inline (vm, node, from_frame, 1, 1);
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200584}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700585
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700586/* *INDENT-OFF* */
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200587VLIB_REGISTER_NODE (esp4_decrypt_node) = {
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200588 .name = "esp4-decrypt",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700589 .vector_size = sizeof (u32),
590 .format_trace = format_esp_decrypt_trace,
591 .type = VLIB_NODE_TYPE_INTERNAL,
592
593 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
594 .error_strings = esp_decrypt_error_strings,
595
596 .n_next_nodes = ESP_DECRYPT_N_NEXT,
597 .next_nodes = {
598#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
599 foreach_esp_decrypt_next
600#undef _
601 },
602};
603
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200604VLIB_REGISTER_NODE (esp6_decrypt_node) = {
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200605 .name = "esp6-decrypt",
606 .vector_size = sizeof (u32),
607 .format_trace = format_esp_decrypt_trace,
608 .type = VLIB_NODE_TYPE_INTERNAL,
609
610 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
611 .error_strings = esp_decrypt_error_strings,
612
613 .n_next_nodes = ESP_DECRYPT_N_NEXT,
614 .next_nodes = {
615#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
616 foreach_esp_decrypt_next
617#undef _
618 },
619};
Neale Rannsc87b66c2019-02-07 07:26:12 -0800620
621VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
622 .name = "esp4-decrypt-tun",
623 .vector_size = sizeof (u32),
624 .format_trace = format_esp_decrypt_trace,
625 .type = VLIB_NODE_TYPE_INTERNAL,
Neale Rannsc87b66c2019-02-07 07:26:12 -0800626 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
627 .error_strings = esp_decrypt_error_strings,
Neale Ranns12989b52019-09-26 16:20:19 +0000628 .sibling_of = "esp4-decrypt",
Neale Rannsc87b66c2019-02-07 07:26:12 -0800629};
630
631VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
632 .name = "esp6-decrypt-tun",
633 .vector_size = sizeof (u32),
634 .format_trace = format_esp_decrypt_trace,
635 .type = VLIB_NODE_TYPE_INTERNAL,
Neale Rannsc87b66c2019-02-07 07:26:12 -0800636 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
637 .error_strings = esp_decrypt_error_strings,
Neale Ranns12989b52019-09-26 16:20:19 +0000638 .sibling_of = "esp6-decrypt",
Neale Rannsc87b66c2019-02-07 07:26:12 -0800639};
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200640/* *INDENT-ON* */
641
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700642/*
643 * fd.io coding-style-patch-verification: ON
644 *
645 * Local Variables:
646 * eval: (c-set-style "gnu")
647 * End:
648 */