blob: 87eb801f76f98259a9491fe51c78484064d046c4 [file] [log] [blame]
Neale Rannsf62a8c02019-04-02 08:13:33 +00001/*
2 * esp_encrypt.c : IPSec ESP encrypt node
3 *
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/ipsec/ipsec.h>
19#include <vnet/ipsec/ipsec_sa.h>
20
21#define foreach_ipsec_handoff_error \
22_(CONGESTION_DROP, "congestion drop")
23
24typedef enum
25{
26#define _(sym,str) IPSEC_HANDOFF_ERROR_##sym,
27 foreach_ipsec_handoff_error
28#undef _
29 NAT44_HANDOFF_N_ERROR,
30} ipsec_handoff_error_t;
31
32static char *ipsec_handoff_error_strings[] = {
33#define _(sym,string) string,
34 foreach_ipsec_handoff_error
35#undef _
36};
37
38typedef struct ipsec_handoff_trace_t_
39{
40 u32 next_worker_index;
41} ipsec_handoff_trace_t;
42
43static u8 *
44format_ipsec_handoff_trace (u8 * s, va_list * args)
45{
46 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
47 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
48 ipsec_handoff_trace_t *t = va_arg (*args, ipsec_handoff_trace_t *);
49
50 s = format (s, "next-worker %d", t->next_worker_index);
51
52 return s;
53}
54
55/* do worker handoff based on thread_index in NAT HA protcol header */
56static_always_inline uword
57ipsec_handoff (vlib_main_t * vm,
58 vlib_node_runtime_t * node,
59 vlib_frame_t * frame, u32 fq_index, bool is_enc)
60{
61 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
62 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
63 u32 n_enq, n_left_from, *from;
64 ipsec_main_t *im;
65
66 im = &ipsec_main;
67 from = vlib_frame_vector_args (frame);
68 n_left_from = frame->n_vectors;
69 vlib_get_buffers (vm, from, bufs, n_left_from);
70
71 b = bufs;
72 ti = thread_indices;
73
74 while (n_left_from >= 4)
75 {
76 ipsec_sa_t *sa0, *sa1, *sa2, *sa3;
77 u32 sai0, sai1, sai2, sai3;
78
79 /* Prefetch next iteration. */
80 if (n_left_from >= 12)
81 {
82 vlib_prefetch_buffer_header (b[8], LOAD);
83 vlib_prefetch_buffer_header (b[9], LOAD);
84 vlib_prefetch_buffer_header (b[10], LOAD);
85 vlib_prefetch_buffer_header (b[11], LOAD);
86
87 vlib_prefetch_buffer_data (b[4], LOAD);
88 vlib_prefetch_buffer_data (b[5], LOAD);
89 vlib_prefetch_buffer_data (b[6], LOAD);
90 vlib_prefetch_buffer_data (b[7], LOAD);
91 }
92
93 sai0 = vnet_buffer (b[0])->ipsec.sad_index;
94 sai1 = vnet_buffer (b[1])->ipsec.sad_index;
95 sai2 = vnet_buffer (b[2])->ipsec.sad_index;
96 sai3 = vnet_buffer (b[3])->ipsec.sad_index;
97 sa0 = pool_elt_at_index (im->sad, sai0);
98 sa1 = pool_elt_at_index (im->sad, sai1);
99 sa2 = pool_elt_at_index (im->sad, sai2);
100 sa3 = pool_elt_at_index (im->sad, sai3);
101
102 if (is_enc)
103 {
104 ti[0] = sa0->encrypt_thread_index;
105 ti[1] = sa1->encrypt_thread_index;
106 ti[2] = sa2->encrypt_thread_index;
107 ti[3] = sa3->encrypt_thread_index;
108 }
109 else
110 {
111 ti[0] = sa0->decrypt_thread_index;
112 ti[1] = sa1->decrypt_thread_index;
113 ti[2] = sa2->decrypt_thread_index;
114 ti[3] = sa3->decrypt_thread_index;
115 }
116
117 if (node->flags & VLIB_NODE_FLAG_TRACE)
118 {
119 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
120 {
121 ipsec_handoff_trace_t *t =
122 vlib_add_trace (vm, node, b[0], sizeof (*t));
123 t->next_worker_index = ti[0];
124 }
125 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
126 {
127 ipsec_handoff_trace_t *t =
128 vlib_add_trace (vm, node, b[1], sizeof (*t));
129 t->next_worker_index = ti[1];
130 }
131 if (PREDICT_FALSE (b[2]->flags & VLIB_BUFFER_IS_TRACED))
132 {
133 ipsec_handoff_trace_t *t =
134 vlib_add_trace (vm, node, b[2], sizeof (*t));
135 t->next_worker_index = ti[2];
136 }
137 if (PREDICT_FALSE (b[3]->flags & VLIB_BUFFER_IS_TRACED))
138 {
139 ipsec_handoff_trace_t *t =
140 vlib_add_trace (vm, node, b[3], sizeof (*t));
141 t->next_worker_index = ti[3];
142 }
143 }
144
145 n_left_from -= 4;
146 ti += 4;
147 b += 4;
148 }
149 while (n_left_from > 0)
150 {
151 ipsec_sa_t *sa0;
152 u32 sai0;
153
154 sai0 = vnet_buffer (b[0])->ipsec.sad_index;
155 sa0 = pool_elt_at_index (im->sad, sai0);
156
157 if (is_enc)
158 ti[0] = sa0->encrypt_thread_index;
159 else
160 ti[0] = sa0->decrypt_thread_index;
161
162 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
163 {
164 ipsec_handoff_trace_t *t =
165 vlib_add_trace (vm, node, b[0], sizeof (*t));
166 t->next_worker_index = ti[0];
167 }
168
169 n_left_from -= 1;
170 ti += 1;
171 b += 1;
172 }
173
174 n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from,
175 thread_indices, frame->n_vectors, 1);
176
177 if (n_enq < frame->n_vectors)
178 vlib_node_increment_counter (vm, node->node_index,
179 IPSEC_HANDOFF_ERROR_CONGESTION_DROP,
180 frame->n_vectors - n_enq);
181
182 return n_enq;
183}
184
185VLIB_NODE_FN (esp4_encrypt_handoff) (vlib_main_t * vm,
186 vlib_node_runtime_t * node,
187 vlib_frame_t * from_frame)
188{
189 ipsec_main_t *im = &ipsec_main;
190
191 return ipsec_handoff (vm, node, from_frame, im->esp4_enc_fq_index, true);
192}
193
194VLIB_NODE_FN (esp6_encrypt_handoff) (vlib_main_t * vm,
195 vlib_node_runtime_t * node,
196 vlib_frame_t * from_frame)
197{
198 ipsec_main_t *im = &ipsec_main;
199
200 return ipsec_handoff (vm, node, from_frame, im->esp6_enc_fq_index, true);
201}
202
203VLIB_NODE_FN (esp4_encrypt_tun_handoff) (vlib_main_t * vm,
204 vlib_node_runtime_t * node,
205 vlib_frame_t * from_frame)
206{
207 ipsec_main_t *im = &ipsec_main;
208
209 return ipsec_handoff (vm, node, from_frame, im->esp4_enc_tun_fq_index,
210 true);
211}
212
213VLIB_NODE_FN (esp6_encrypt_tun_handoff) (vlib_main_t * vm,
214 vlib_node_runtime_t * node,
215 vlib_frame_t * from_frame)
216{
217 ipsec_main_t *im = &ipsec_main;
218
219 return ipsec_handoff (vm, node, from_frame, im->esp6_enc_tun_fq_index,
220 true);
221}
222
Neale Ranns4a58e492020-12-21 13:19:10 +0000223VLIB_NODE_FN (esp_mpls_encrypt_tun_handoff)
224(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
225{
226 ipsec_main_t *im = &ipsec_main;
227
228 return ipsec_handoff (vm, node, from_frame, im->esp_mpls_enc_tun_fq_index,
229 true);
230}
231
Neale Rannsf62a8c02019-04-02 08:13:33 +0000232VLIB_NODE_FN (esp4_decrypt_handoff) (vlib_main_t * vm,
233 vlib_node_runtime_t * node,
234 vlib_frame_t * from_frame)
235{
236 ipsec_main_t *im = &ipsec_main;
237
238 return ipsec_handoff (vm, node, from_frame, im->esp4_dec_fq_index, false);
239}
240
241VLIB_NODE_FN (esp6_decrypt_handoff) (vlib_main_t * vm,
242 vlib_node_runtime_t * node,
243 vlib_frame_t * from_frame)
244{
245 ipsec_main_t *im = &ipsec_main;
246
247 return ipsec_handoff (vm, node, from_frame, im->esp6_dec_fq_index, false);
248}
249
250VLIB_NODE_FN (esp4_decrypt_tun_handoff) (vlib_main_t * vm,
251 vlib_node_runtime_t * node,
252 vlib_frame_t * from_frame)
253{
254 ipsec_main_t *im = &ipsec_main;
255
256 return ipsec_handoff (vm, node, from_frame, im->esp4_dec_tun_fq_index,
257 false);
258}
259
260VLIB_NODE_FN (esp6_decrypt_tun_handoff) (vlib_main_t * vm,
261 vlib_node_runtime_t * node,
262 vlib_frame_t * from_frame)
263{
264 ipsec_main_t *im = &ipsec_main;
265
266 return ipsec_handoff (vm, node, from_frame, im->esp6_dec_tun_fq_index,
267 false);
268}
269
270VLIB_NODE_FN (ah4_encrypt_handoff) (vlib_main_t * vm,
271 vlib_node_runtime_t * node,
272 vlib_frame_t * from_frame)
273{
274 ipsec_main_t *im = &ipsec_main;
275
276 return ipsec_handoff (vm, node, from_frame, im->ah4_enc_fq_index, true);
277}
278
279VLIB_NODE_FN (ah6_encrypt_handoff) (vlib_main_t * vm,
280 vlib_node_runtime_t * node,
281 vlib_frame_t * from_frame)
282{
283 ipsec_main_t *im = &ipsec_main;
284
285 return ipsec_handoff (vm, node, from_frame, im->ah6_enc_fq_index, true);
286}
287
288VLIB_NODE_FN (ah4_decrypt_handoff) (vlib_main_t * vm,
289 vlib_node_runtime_t * node,
290 vlib_frame_t * from_frame)
291{
292 ipsec_main_t *im = &ipsec_main;
293
294 return ipsec_handoff (vm, node, from_frame, im->ah4_dec_fq_index, false);
295}
296
297VLIB_NODE_FN (ah6_decrypt_handoff) (vlib_main_t * vm,
298 vlib_node_runtime_t * node,
299 vlib_frame_t * from_frame)
300{
301 ipsec_main_t *im = &ipsec_main;
302
303 return ipsec_handoff (vm, node, from_frame, im->ah6_dec_fq_index, false);
304}
305
306/* *INDENT-OFF* */
307VLIB_REGISTER_NODE (esp4_encrypt_handoff) = {
308 .name = "esp4-encrypt-handoff",
309 .vector_size = sizeof (u32),
310 .format_trace = format_ipsec_handoff_trace,
311 .type = VLIB_NODE_TYPE_INTERNAL,
312 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
313 .error_strings = ipsec_handoff_error_strings,
314 .n_next_nodes = 1,
315 .next_nodes = {
316 [0] = "error-drop",
317 },
318};
319VLIB_REGISTER_NODE (esp6_encrypt_handoff) = {
320 .name = "esp6-encrypt-handoff",
321 .vector_size = sizeof (u32),
322 .format_trace = format_ipsec_handoff_trace,
323 .type = VLIB_NODE_TYPE_INTERNAL,
324 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
325 .error_strings = ipsec_handoff_error_strings,
326 .n_next_nodes = 1,
327 .next_nodes = {
328 [0] = "error-drop",
329 },
330};
331VLIB_REGISTER_NODE (esp4_encrypt_tun_handoff) = {
332 .name = "esp4-encrypt-tun-handoff",
333 .vector_size = sizeof (u32),
334 .format_trace = format_ipsec_handoff_trace,
335 .type = VLIB_NODE_TYPE_INTERNAL,
336 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
337 .error_strings = ipsec_handoff_error_strings,
338 .n_next_nodes = 1,
339 .next_nodes = {
340 [0] = "error-drop",
341 },
342};
343VLIB_REGISTER_NODE (esp6_encrypt_tun_handoff) = {
344 .name = "esp6-encrypt-tun-handoff",
345 .vector_size = sizeof (u32),
346 .format_trace = format_ipsec_handoff_trace,
347 .type = VLIB_NODE_TYPE_INTERNAL,
348 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
349 .error_strings = ipsec_handoff_error_strings,
350 .n_next_nodes = 1,
351 .next_nodes = {
352 [0] = "error-drop",
353 },
354};
Neale Ranns4a58e492020-12-21 13:19:10 +0000355VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_handoff) = {
356 .name = "esp-mpls-encrypt-tun-handoff",
357 .vector_size = sizeof (u32),
358 .format_trace = format_ipsec_handoff_trace,
359 .type = VLIB_NODE_TYPE_INTERNAL,
360 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
361 .error_strings = ipsec_handoff_error_strings,
362 .n_next_nodes = 1,
363 .next_nodes = {
364 [0] = "error-drop",
365 },
366};
Neale Rannsf62a8c02019-04-02 08:13:33 +0000367VLIB_REGISTER_NODE (esp4_decrypt_handoff) = {
368 .name = "esp4-decrypt-handoff",
369 .vector_size = sizeof (u32),
370 .format_trace = format_ipsec_handoff_trace,
371 .type = VLIB_NODE_TYPE_INTERNAL,
372 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
373 .error_strings = ipsec_handoff_error_strings,
374 .n_next_nodes = 1,
375 .next_nodes = {
376 [0] = "error-drop",
377 },
378};
379VLIB_REGISTER_NODE (esp6_decrypt_handoff) = {
380 .name = "esp6-decrypt-handoff",
381 .vector_size = sizeof (u32),
382 .format_trace = format_ipsec_handoff_trace,
383 .type = VLIB_NODE_TYPE_INTERNAL,
384 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
385 .error_strings = ipsec_handoff_error_strings,
386 .n_next_nodes = 1,
387 .next_nodes = {
388 [0] = "error-drop",
389 },
390};
391VLIB_REGISTER_NODE (esp4_decrypt_tun_handoff) = {
392 .name = "esp4-decrypt-tun-handoff",
393 .vector_size = sizeof (u32),
394 .format_trace = format_ipsec_handoff_trace,
395 .type = VLIB_NODE_TYPE_INTERNAL,
396 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
397 .error_strings = ipsec_handoff_error_strings,
398 .n_next_nodes = 1,
399 .next_nodes = {
400 [0] = "error-drop",
401 },
402};
403VLIB_REGISTER_NODE (esp6_decrypt_tun_handoff) = {
404 .name = "esp6-decrypt-tun-handoff",
405 .vector_size = sizeof (u32),
406 .format_trace = format_ipsec_handoff_trace,
407 .type = VLIB_NODE_TYPE_INTERNAL,
408 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
409 .error_strings = ipsec_handoff_error_strings,
410 .n_next_nodes = 1,
411 .next_nodes = {
412 [0] = "error-drop",
413 },
414};
415VLIB_REGISTER_NODE (ah4_encrypt_handoff) = {
416 .name = "ah4-encrypt-handoff",
417 .vector_size = sizeof (u32),
418 .format_trace = format_ipsec_handoff_trace,
419 .type = VLIB_NODE_TYPE_INTERNAL,
420 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
421 .error_strings = ipsec_handoff_error_strings,
422 .n_next_nodes = 1,
423 .next_nodes = {
424 [0] = "error-drop",
425 },
426};
427VLIB_REGISTER_NODE (ah6_encrypt_handoff) = {
428 .name = "ah6-encrypt-handoff",
429 .vector_size = sizeof (u32),
430 .format_trace = format_ipsec_handoff_trace,
431 .type = VLIB_NODE_TYPE_INTERNAL,
432 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
433 .error_strings = ipsec_handoff_error_strings,
434 .n_next_nodes = 1,
435 .next_nodes = {
436 [0] = "error-drop",
437 },
438};
439VLIB_REGISTER_NODE (ah4_decrypt_handoff) = {
440 .name = "ah4-decrypt-handoff",
441 .vector_size = sizeof (u32),
442 .format_trace = format_ipsec_handoff_trace,
443 .type = VLIB_NODE_TYPE_INTERNAL,
444 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
445 .error_strings = ipsec_handoff_error_strings,
446 .n_next_nodes = 1,
447 .next_nodes = {
448 [0] = "error-drop",
449 },
450};
451VLIB_REGISTER_NODE (ah6_decrypt_handoff) = {
452 .name = "ah6-decrypt-handoff",
453 .vector_size = sizeof (u32),
454 .format_trace = format_ipsec_handoff_trace,
455 .type = VLIB_NODE_TYPE_INTERNAL,
456 .n_errors = ARRAY_LEN(ipsec_handoff_error_strings),
457 .error_strings = ipsec_handoff_error_strings,
458 .n_next_nodes = 1,
459 .next_nodes = {
460 [0] = "error-drop",
461 },
462};
463/* *INDENT-ON* */
464
465/*
466 * fd.io coding-style-patch-verification: ON
467 *
468 * Local Variables:
469 * eval: (c-set-style "gnu")
470 * End:
471 */