blob: a362d23e890edf3f57b7c1b89e9e32b01a92afd8 [file] [log] [blame]
Neale Rannsf068c3e2018-01-03 04:18:48 -08001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/dpo/dvr_dpo.h>
17#include <vnet/fib/fib_node.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20
BenoƮt Ganne47727c02019-02-12 13:35:08 +010021dvr_dpo_t *dvr_dpo_pool;
22
Neale Rannsf068c3e2018-01-03 04:18:48 -080023/**
24 * The 'DB' of DVR DPOs.
25 * There is one per-interface per-L3 proto, so this is a per-interface vector
26 */
27static index_t *dvr_dpo_db[DPO_PROTO_NUM];
28
29static dvr_dpo_t *
30dvr_dpo_alloc (void)
31{
32 dvr_dpo_t *dd;
33
34 pool_get(dvr_dpo_pool, dd);
35
36 return (dd);
37}
38
39static inline dvr_dpo_t *
40dvr_dpo_get_from_dpo (const dpo_id_t *dpo)
41{
42 ASSERT(DPO_DVR == dpo->dpoi_type);
43
44 return (dvr_dpo_get(dpo->dpoi_index));
45}
46
47static inline index_t
48dvr_dpo_get_index (dvr_dpo_t *dd)
49{
50 return (dd - dvr_dpo_pool);
51}
52
53static void
54dvr_dpo_lock (dpo_id_t *dpo)
55{
56 dvr_dpo_t *dd;
57
58 dd = dvr_dpo_get_from_dpo(dpo);
59 dd->dd_locks++;
60}
61
62static void
63dvr_dpo_unlock (dpo_id_t *dpo)
64{
65 dvr_dpo_t *dd;
66
67 dd = dvr_dpo_get_from_dpo(dpo);
68 dd->dd_locks--;
69
70 if (0 == dd->dd_locks)
71 {
72 if (DPO_PROTO_IP4 == dd->dd_proto)
73 {
74 vnet_feature_enable_disable ("ip4-output", "ip4-dvr-reinject",
75 dd->dd_sw_if_index, 0, 0, 0);
76 }
77 else
78 {
79 vnet_feature_enable_disable ("ip6-output", "ip6-dvr-reinject",
80 dd->dd_sw_if_index, 0, 0, 0);
81 }
82
83 dvr_dpo_db[dd->dd_proto][dd->dd_sw_if_index] = INDEX_INVALID;
84 pool_put(dvr_dpo_pool, dd);
85 }
86}
87
88void
89dvr_dpo_add_or_lock (u32 sw_if_index,
90 dpo_proto_t dproto,
91 dpo_id_t *dpo)
92{
93 dvr_dpo_t *dd;
94
95 vec_validate_init_empty(dvr_dpo_db[dproto],
96 sw_if_index,
97 INDEX_INVALID);
98
99 if (INDEX_INVALID == dvr_dpo_db[dproto][sw_if_index])
100 {
101 dd = dvr_dpo_alloc();
102
103 dd->dd_sw_if_index = sw_if_index;
104 dd->dd_proto = dproto;
105
106 dvr_dpo_db[dproto][sw_if_index] = dvr_dpo_get_index(dd);
107
108 /*
109 * enable the reinject into L2 path feature on the interface
110 */
111 if (DPO_PROTO_IP4 == dproto)
112 vnet_feature_enable_disable ("ip4-output", "ip4-dvr-reinject",
113 dd->dd_sw_if_index, 1, 0, 0);
114 else if (DPO_PROTO_IP6 == dproto)
115 vnet_feature_enable_disable ("ip6-output", "ip6-dvr-reinject",
116 dd->dd_sw_if_index, 1, 0, 0);
117 else
118 ASSERT(0);
119 }
120 else
121 {
122 dd = dvr_dpo_get(dvr_dpo_db[dproto][sw_if_index]);
123 }
124
125 dpo_set(dpo, DPO_DVR, dproto, dvr_dpo_get_index(dd));
126}
127
128
129static clib_error_t *
130dvr_dpo_interface_state_change (vnet_main_t * vnm,
131 u32 sw_if_index,
132 u32 flags)
133{
134 /*
135 */
136 return (NULL);
137}
138
139VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(
140 dvr_dpo_interface_state_change);
141
142/**
143 * @brief Registered callback for HW interface state changes
144 */
145static clib_error_t *
146dvr_dpo_hw_interface_state_change (vnet_main_t * vnm,
147 u32 hw_if_index,
148 u32 flags)
149{
150 return (NULL);
151}
152
153VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
154 dvr_dpo_hw_interface_state_change);
155
156static clib_error_t *
157dvr_dpo_interface_delete (vnet_main_t * vnm,
158 u32 sw_if_index,
159 u32 is_add)
160{
161 return (NULL);
162}
163
164VNET_SW_INTERFACE_ADD_DEL_FUNCTION(
165 dvr_dpo_interface_delete);
166
167u8*
168format_dvr_dpo (u8* s, va_list *ap)
169{
170 index_t index = va_arg(*ap, index_t);
171 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
172 vnet_main_t * vnm = vnet_get_main();
173 dvr_dpo_t *dd = dvr_dpo_get(index);
174
175 return (format(s, "dvr-%U-dpo",
176 format_vnet_sw_interface_name,
177 vnm,
178 vnet_get_sw_interface(vnm, dd->dd_sw_if_index)));
179}
180
181static void
182dvr_dpo_mem_show (void)
183{
184 fib_show_memory_usage("DVR",
185 pool_elts(dvr_dpo_pool),
186 pool_len(dvr_dpo_pool),
187 sizeof(dvr_dpo_t));
188}
189
190
191const static dpo_vft_t dvr_dpo_vft = {
192 .dv_lock = dvr_dpo_lock,
193 .dv_unlock = dvr_dpo_unlock,
194 .dv_format = format_dvr_dpo,
195 .dv_mem_show = dvr_dpo_mem_show,
196};
197
198/**
199 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
200 * object.
201 *
202 * this means that these graph nodes are ones from which a glean is the
203 * parent object in the DPO-graph.
204 */
205const static char* const dvr_dpo_ip4_nodes[] =
206{
207 "ip4-dvr-dpo",
208 NULL,
209};
210const static char* const dvr_dpo_ip6_nodes[] =
211{
212 "ip6-dvr-dpo",
213 NULL,
214};
215
216const static char* const * const dvr_dpo_nodes[DPO_PROTO_NUM] =
217{
218 [DPO_PROTO_IP4] = dvr_dpo_ip4_nodes,
219 [DPO_PROTO_IP6] = dvr_dpo_ip6_nodes,
220};
221
222void
223dvr_dpo_module_init (void)
224{
225 dpo_register(DPO_DVR,
226 &dvr_dpo_vft,
227 dvr_dpo_nodes);
228}
229
230/**
231 * @brief Interface DPO trace data
232 */
233typedef struct dvr_dpo_trace_t_
234{
235 u32 sw_if_index;
236} dvr_dpo_trace_t;
237
238always_inline uword
239dvr_dpo_inline (vlib_main_t * vm,
240 vlib_node_runtime_t * node,
241 vlib_frame_t * from_frame,
242 u8 is_ip6)
243{
244 u32 n_left_from, next_index, * from, * to_next;
245 ip_lookup_main_t *lm = (is_ip6?
246 &ip6_main.lookup_main:
247 &ip4_main.lookup_main);
248
249 from = vlib_frame_vector_args (from_frame);
250 n_left_from = from_frame->n_vectors;
251
252 next_index = node->cached_next_index;
253
254 while (n_left_from > 0)
255 {
256 u32 n_left_to_next;
257
258 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
259
260 while (n_left_from >= 4 && n_left_to_next > 2)
261 {
262 const dvr_dpo_t *dd0, *dd1;
263 u32 bi0, ddi0, bi1, ddi1;
264 vlib_buffer_t *b0, *b1;
265 u32 next0, next1;
266 u8 len0, len1;
267
268 bi0 = from[0];
269 to_next[0] = bi0;
270 bi1 = from[1];
271 to_next[1] = bi1;
272 from += 2;
273 to_next += 2;
274 n_left_from -= 2;
275 n_left_to_next -= 2;
276 next0 = next1 = 0;
277
278 b0 = vlib_get_buffer (vm, bi0);
279 b1 = vlib_get_buffer (vm, bi1);
280
281 ddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
282 ddi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
283 dd0 = dvr_dpo_get(ddi0);
284 dd1 = dvr_dpo_get(ddi1);
285
286 vnet_buffer(b0)->sw_if_index[VLIB_TX] = dd0->dd_sw_if_index;
287 vnet_buffer(b1)->sw_if_index[VLIB_TX] = dd1->dd_sw_if_index;
288
289 len0 = ((u8*)vlib_buffer_get_current(b0) -
290 (u8*)ethernet_buffer_get_header(b0));
291 len1 = ((u8*)vlib_buffer_get_current(b1) -
292 (u8*)ethernet_buffer_get_header(b1));
Neale Ranns7bf3f9f2018-04-09 02:25:27 -0700293 vnet_buffer(b0)->l2.l2_len =
294 vnet_buffer(b0)->ip.save_rewrite_length =
295 len0;
296 vnet_buffer(b1)->l2.l2_len =
297 vnet_buffer(b1)->ip.save_rewrite_length =
298 len1;
Damjan Mariondac03522018-02-01 15:30:13 +0100299 b0->flags |= VNET_BUFFER_F_IS_DVR;
300 b1->flags |= VNET_BUFFER_F_IS_DVR;
Neale Rannsf068c3e2018-01-03 04:18:48 -0800301
302 vlib_buffer_advance(b0, -len0);
303 vlib_buffer_advance(b1, -len1);
304
305 vnet_feature_arc_start (lm->output_feature_arc_index,
306 dd0->dd_sw_if_index, &next0, b0);
307 vnet_feature_arc_start (lm->output_feature_arc_index,
308 dd1->dd_sw_if_index, &next1, b1);
309
310 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
311 {
312 dvr_dpo_trace_t *tr0;
313
314 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
315 tr0->sw_if_index = dd0->dd_sw_if_index;
316 }
317 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
318 {
319 dvr_dpo_trace_t *tr1;
320
321 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
322 tr1->sw_if_index = dd1->dd_sw_if_index;
323 }
324
325 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
326 n_left_to_next, bi0, bi1,
327 next0, next1);
328 }
329
330 while (n_left_from > 0 && n_left_to_next > 0)
331 {
332 const dvr_dpo_t * dd0;
333 vlib_buffer_t * b0;
334 u32 bi0, ddi0;
335 u32 next0;
336 u8 len0;
337
338 bi0 = from[0];
339 to_next[0] = bi0;
340 from += 1;
341 to_next += 1;
342 n_left_from -= 1;
343 n_left_to_next -= 1;
344 next0 = 0;
345
346 b0 = vlib_get_buffer (vm, bi0);
347
348 ddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
349 dd0 = dvr_dpo_get(ddi0);
350
351 vnet_buffer(b0)->sw_if_index[VLIB_TX] = dd0->dd_sw_if_index;
352
353 /*
354 * take that, rewind it back...
355 */
356 len0 = ((u8*)vlib_buffer_get_current(b0) -
357 (u8*)ethernet_buffer_get_header(b0));
Neale Ranns7bf3f9f2018-04-09 02:25:27 -0700358 vnet_buffer(b0)->l2.l2_len =
359 vnet_buffer(b0)->ip.save_rewrite_length =
360 len0;
Damjan Mariondac03522018-02-01 15:30:13 +0100361 b0->flags |= VNET_BUFFER_F_IS_DVR;
Neale Rannsf068c3e2018-01-03 04:18:48 -0800362 vlib_buffer_advance(b0, -len0);
363
364 /*
365 * start processing the ipX output features
366 */
367 vnet_feature_arc_start(lm->output_feature_arc_index,
368 dd0->dd_sw_if_index, &next0, b0);
369
370 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
371 {
372 dvr_dpo_trace_t *tr;
373
374 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
375 tr->sw_if_index = dd0->dd_sw_if_index;
376 }
377
378 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
379 n_left_to_next, bi0,
380 next0);
381 }
382 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
383 }
384 return from_frame->n_vectors;
385}
386
387static u8 *
388format_dvr_dpo_trace (u8 * s, va_list * args)
389{
390 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
391 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
392 dvr_dpo_trace_t * t = va_arg (*args, dvr_dpo_trace_t *);
393 u32 indent = format_get_indent (s);
394 s = format (s, "%U sw_if_index:%d",
395 format_white_space, indent,
396 t->sw_if_index);
397 return s;
398}
399
400static uword
401ip4_dvr_dpo (vlib_main_t * vm,
402 vlib_node_runtime_t * node,
403 vlib_frame_t * from_frame)
404{
405 return (dvr_dpo_inline(vm, node, from_frame, 0));
406}
407
408static uword
409ip6_dvr_dpo (vlib_main_t * vm,
410 vlib_node_runtime_t * node,
411 vlib_frame_t * from_frame)
412{
413 return (dvr_dpo_inline(vm, node, from_frame, 1));
414}
415
416VLIB_REGISTER_NODE (ip4_dvr_dpo_node) = {
417 .function = ip4_dvr_dpo,
418 .name = "ip4-dvr-dpo",
419 .vector_size = sizeof (u32),
420 .format_trace = format_dvr_dpo_trace,
421 .sibling_of = "ip4-rewrite",
422};
423VLIB_REGISTER_NODE (ip6_dvr_dpo_node) = {
424 .function = ip6_dvr_dpo,
425 .name = "ip6-dvr-dpo",
426 .vector_size = sizeof (u32),
427 .format_trace = format_dvr_dpo_trace,
428 .sibling_of = "ip6-rewrite",
429};
430
431VLIB_NODE_FUNCTION_MULTIARCH (ip4_dvr_dpo_node, ip4_dvr_dpo)
432VLIB_NODE_FUNCTION_MULTIARCH (ip6_dvr_dpo_node, ip6_dvr_dpo)
433
434typedef enum dvr_reinject_next_t_
435{
436 DVR_REINJECT_OUTPUT = 0,
437} dvr_reinject_next_t;
438
439always_inline uword
440dvr_reinject_inline (vlib_main_t * vm,
441 vlib_node_runtime_t * node,
442 vlib_frame_t * from_frame)
443{
444 u32 n_left_from, next_index, * from, * to_next;
445
446 from = vlib_frame_vector_args (from_frame);
447 n_left_from = from_frame->n_vectors;
448
449 next_index = node->cached_next_index;
450
451 while (n_left_from > 0)
452 {
453 u32 n_left_to_next;
454
455 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
456
457 while (n_left_from >= 4 && n_left_to_next > 2)
458 {
459 dvr_reinject_next_t next0, next1;
460 vlib_buffer_t *b0, *b1;
461 u32 bi0, bi1;
462
463 bi0 = from[0];
464 to_next[0] = bi0;
465 bi1 = from[1];
466 to_next[1] = bi1;
467 from += 2;
468 to_next += 2;
469 n_left_from -= 2;
470 n_left_to_next -= 2;
471
472 b0 = vlib_get_buffer (vm, bi0);
473 b1 = vlib_get_buffer (vm, bi1);
474
Damjan Mariondac03522018-02-01 15:30:13 +0100475 if (b0->flags & VNET_BUFFER_F_IS_DVR)
Neale Rannsf068c3e2018-01-03 04:18:48 -0800476 next0 = DVR_REINJECT_OUTPUT;
477 else
Damjan Marion7d98a122018-07-19 20:42:08 +0200478 vnet_feature_next( &next0, b0);
Neale Rannsf068c3e2018-01-03 04:18:48 -0800479
Damjan Mariondac03522018-02-01 15:30:13 +0100480 if (b1->flags & VNET_BUFFER_F_IS_DVR)
Neale Rannsf068c3e2018-01-03 04:18:48 -0800481 next1 = DVR_REINJECT_OUTPUT;
482 else
Damjan Marion7d98a122018-07-19 20:42:08 +0200483 vnet_feature_next( &next1, b1);
Neale Rannsf068c3e2018-01-03 04:18:48 -0800484
485 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
486 {
487 dvr_dpo_trace_t *tr0;
488
489 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
490 tr0->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
491 }
492 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
493 {
494 dvr_dpo_trace_t *tr1;
495
496 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
497 tr1->sw_if_index = vnet_buffer(b1)->sw_if_index[VLIB_TX];
498 }
499
500 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
501 n_left_to_next, bi0, bi1,
502 next0, next1);
503 }
504
505 while (n_left_from > 0 && n_left_to_next > 0)
506 {
507 dvr_reinject_next_t next0;
508 vlib_buffer_t * b0;
509 u32 bi0;
510
511 bi0 = from[0];
512 to_next[0] = bi0;
513 from += 1;
514 to_next += 1;
515 n_left_from -= 1;
516 n_left_to_next -= 1;
517
518 b0 = vlib_get_buffer (vm, bi0);
519
Damjan Mariondac03522018-02-01 15:30:13 +0100520 if (b0->flags & VNET_BUFFER_F_IS_DVR)
Neale Rannsf068c3e2018-01-03 04:18:48 -0800521 next0 = DVR_REINJECT_OUTPUT;
522 else
Damjan Marion7d98a122018-07-19 20:42:08 +0200523 vnet_feature_next( &next0, b0);
Neale Rannsf068c3e2018-01-03 04:18:48 -0800524
525 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
526 {
527 dvr_dpo_trace_t *tr;
528
529 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
530 tr->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
531 }
532
533 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
534 n_left_to_next, bi0, next0);
535 }
536 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
537 }
538 return from_frame->n_vectors;
539}
540
541static uword
542ip4_dvr_reinject (vlib_main_t * vm,
543 vlib_node_runtime_t * node,
544 vlib_frame_t * from_frame)
545{
546 return (dvr_reinject_inline(vm, node, from_frame));
547}
548
549static uword
550ip6_dvr_reinject (vlib_main_t * vm,
551 vlib_node_runtime_t * node,
552 vlib_frame_t * from_frame)
553{
554 return (dvr_reinject_inline(vm, node, from_frame));
555}
556
557VLIB_REGISTER_NODE (ip4_dvr_reinject_node) = {
558 .function = ip4_dvr_reinject,
559 .name = "ip4-dvr-reinject",
560 .vector_size = sizeof (u32),
561 .format_trace = format_dvr_dpo_trace,
562
563 .n_next_nodes = 1,
564 .next_nodes = {
565 [DVR_REINJECT_OUTPUT] = "l2-output",
566 },
567};
568
569VLIB_REGISTER_NODE (ip6_dvr_reinject_node) = {
570 .function = ip6_dvr_reinject,
571 .name = "ip6-dvr-reinject",
572 .vector_size = sizeof (u32),
573 .format_trace = format_dvr_dpo_trace,
574
575 .n_next_nodes = 1,
576 .next_nodes = {
577 [DVR_REINJECT_OUTPUT] = "l2-output",
578 },
579};
580
581VNET_FEATURE_INIT (ip4_dvr_reinject_feat_node, static) =
582{
583 .arc_name = "ip4-output",
584 .node_name = "ip4-dvr-reinject",
585 .runs_after = VNET_FEATURES ("nat44-in2out-output",
586 "acl-plugin-out-ip4-fa"),
587};
588VNET_FEATURE_INIT (ip6_dvr_reinject_feat_node, static) =
589{
590 .arc_name = "ip6-output",
591 .node_name = "ip6-dvr-reinject",
592 .runs_after = VNET_FEATURES ("acl-plugin-out-ip6-fa"),
593};
594
595VLIB_NODE_FUNCTION_MULTIARCH (ip4_dvr_reinject_node, ip4_dvr_reinject)
596VLIB_NODE_FUNCTION_MULTIARCH (ip6_dvr_reinject_node, ip6_dvr_reinject)