blob: 4b440f81c599c8703d738105f40ba3ecc0f8c154 [file] [log] [blame]
Neale Rannsf068c3e2018-01-03 04:18:48 -08001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/dpo/dvr_dpo.h>
17#include <vnet/fib/fib_node.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20
21/**
22 * The 'DB' of DVR DPOs.
23 * There is one per-interface per-L3 proto, so this is a per-interface vector
24 */
25static index_t *dvr_dpo_db[DPO_PROTO_NUM];
26
27static dvr_dpo_t *
28dvr_dpo_alloc (void)
29{
30 dvr_dpo_t *dd;
31
32 pool_get(dvr_dpo_pool, dd);
33
34 return (dd);
35}
36
37static inline dvr_dpo_t *
38dvr_dpo_get_from_dpo (const dpo_id_t *dpo)
39{
40 ASSERT(DPO_DVR == dpo->dpoi_type);
41
42 return (dvr_dpo_get(dpo->dpoi_index));
43}
44
45static inline index_t
46dvr_dpo_get_index (dvr_dpo_t *dd)
47{
48 return (dd - dvr_dpo_pool);
49}
50
51static void
52dvr_dpo_lock (dpo_id_t *dpo)
53{
54 dvr_dpo_t *dd;
55
56 dd = dvr_dpo_get_from_dpo(dpo);
57 dd->dd_locks++;
58}
59
60static void
61dvr_dpo_unlock (dpo_id_t *dpo)
62{
63 dvr_dpo_t *dd;
64
65 dd = dvr_dpo_get_from_dpo(dpo);
66 dd->dd_locks--;
67
68 if (0 == dd->dd_locks)
69 {
70 if (DPO_PROTO_IP4 == dd->dd_proto)
71 {
72 vnet_feature_enable_disable ("ip4-output", "ip4-dvr-reinject",
73 dd->dd_sw_if_index, 0, 0, 0);
74 }
75 else
76 {
77 vnet_feature_enable_disable ("ip6-output", "ip6-dvr-reinject",
78 dd->dd_sw_if_index, 0, 0, 0);
79 }
80
81 dvr_dpo_db[dd->dd_proto][dd->dd_sw_if_index] = INDEX_INVALID;
82 pool_put(dvr_dpo_pool, dd);
83 }
84}
85
86void
87dvr_dpo_add_or_lock (u32 sw_if_index,
88 dpo_proto_t dproto,
89 dpo_id_t *dpo)
90{
91 dvr_dpo_t *dd;
92
93 vec_validate_init_empty(dvr_dpo_db[dproto],
94 sw_if_index,
95 INDEX_INVALID);
96
97 if (INDEX_INVALID == dvr_dpo_db[dproto][sw_if_index])
98 {
99 dd = dvr_dpo_alloc();
100
101 dd->dd_sw_if_index = sw_if_index;
102 dd->dd_proto = dproto;
103
104 dvr_dpo_db[dproto][sw_if_index] = dvr_dpo_get_index(dd);
105
106 /*
107 * enable the reinject into L2 path feature on the interface
108 */
109 if (DPO_PROTO_IP4 == dproto)
110 vnet_feature_enable_disable ("ip4-output", "ip4-dvr-reinject",
111 dd->dd_sw_if_index, 1, 0, 0);
112 else if (DPO_PROTO_IP6 == dproto)
113 vnet_feature_enable_disable ("ip6-output", "ip6-dvr-reinject",
114 dd->dd_sw_if_index, 1, 0, 0);
115 else
116 ASSERT(0);
117 }
118 else
119 {
120 dd = dvr_dpo_get(dvr_dpo_db[dproto][sw_if_index]);
121 }
122
123 dpo_set(dpo, DPO_DVR, dproto, dvr_dpo_get_index(dd));
124}
125
126
127static clib_error_t *
128dvr_dpo_interface_state_change (vnet_main_t * vnm,
129 u32 sw_if_index,
130 u32 flags)
131{
132 /*
133 */
134 return (NULL);
135}
136
137VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(
138 dvr_dpo_interface_state_change);
139
140/**
141 * @brief Registered callback for HW interface state changes
142 */
143static clib_error_t *
144dvr_dpo_hw_interface_state_change (vnet_main_t * vnm,
145 u32 hw_if_index,
146 u32 flags)
147{
148 return (NULL);
149}
150
151VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
152 dvr_dpo_hw_interface_state_change);
153
154static clib_error_t *
155dvr_dpo_interface_delete (vnet_main_t * vnm,
156 u32 sw_if_index,
157 u32 is_add)
158{
159 return (NULL);
160}
161
162VNET_SW_INTERFACE_ADD_DEL_FUNCTION(
163 dvr_dpo_interface_delete);
164
165u8*
166format_dvr_dpo (u8* s, va_list *ap)
167{
168 index_t index = va_arg(*ap, index_t);
169 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
170 vnet_main_t * vnm = vnet_get_main();
171 dvr_dpo_t *dd = dvr_dpo_get(index);
172
173 return (format(s, "dvr-%U-dpo",
174 format_vnet_sw_interface_name,
175 vnm,
176 vnet_get_sw_interface(vnm, dd->dd_sw_if_index)));
177}
178
179static void
180dvr_dpo_mem_show (void)
181{
182 fib_show_memory_usage("DVR",
183 pool_elts(dvr_dpo_pool),
184 pool_len(dvr_dpo_pool),
185 sizeof(dvr_dpo_t));
186}
187
188
189const static dpo_vft_t dvr_dpo_vft = {
190 .dv_lock = dvr_dpo_lock,
191 .dv_unlock = dvr_dpo_unlock,
192 .dv_format = format_dvr_dpo,
193 .dv_mem_show = dvr_dpo_mem_show,
194};
195
196/**
197 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
198 * object.
199 *
200 * this means that these graph nodes are ones from which a glean is the
201 * parent object in the DPO-graph.
202 */
203const static char* const dvr_dpo_ip4_nodes[] =
204{
205 "ip4-dvr-dpo",
206 NULL,
207};
208const static char* const dvr_dpo_ip6_nodes[] =
209{
210 "ip6-dvr-dpo",
211 NULL,
212};
213
214const static char* const * const dvr_dpo_nodes[DPO_PROTO_NUM] =
215{
216 [DPO_PROTO_IP4] = dvr_dpo_ip4_nodes,
217 [DPO_PROTO_IP6] = dvr_dpo_ip6_nodes,
218};
219
220void
221dvr_dpo_module_init (void)
222{
223 dpo_register(DPO_DVR,
224 &dvr_dpo_vft,
225 dvr_dpo_nodes);
226}
227
228/**
229 * @brief Interface DPO trace data
230 */
231typedef struct dvr_dpo_trace_t_
232{
233 u32 sw_if_index;
234} dvr_dpo_trace_t;
235
236always_inline uword
237dvr_dpo_inline (vlib_main_t * vm,
238 vlib_node_runtime_t * node,
239 vlib_frame_t * from_frame,
240 u8 is_ip6)
241{
242 u32 n_left_from, next_index, * from, * to_next;
243 ip_lookup_main_t *lm = (is_ip6?
244 &ip6_main.lookup_main:
245 &ip4_main.lookup_main);
246
247 from = vlib_frame_vector_args (from_frame);
248 n_left_from = from_frame->n_vectors;
249
250 next_index = node->cached_next_index;
251
252 while (n_left_from > 0)
253 {
254 u32 n_left_to_next;
255
256 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
257
258 while (n_left_from >= 4 && n_left_to_next > 2)
259 {
260 const dvr_dpo_t *dd0, *dd1;
261 u32 bi0, ddi0, bi1, ddi1;
262 vlib_buffer_t *b0, *b1;
263 u32 next0, next1;
264 u8 len0, len1;
265
266 bi0 = from[0];
267 to_next[0] = bi0;
268 bi1 = from[1];
269 to_next[1] = bi1;
270 from += 2;
271 to_next += 2;
272 n_left_from -= 2;
273 n_left_to_next -= 2;
274 next0 = next1 = 0;
275
276 b0 = vlib_get_buffer (vm, bi0);
277 b1 = vlib_get_buffer (vm, bi1);
278
279 ddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
280 ddi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
281 dd0 = dvr_dpo_get(ddi0);
282 dd1 = dvr_dpo_get(ddi1);
283
284 vnet_buffer(b0)->sw_if_index[VLIB_TX] = dd0->dd_sw_if_index;
285 vnet_buffer(b1)->sw_if_index[VLIB_TX] = dd1->dd_sw_if_index;
286
287 len0 = ((u8*)vlib_buffer_get_current(b0) -
288 (u8*)ethernet_buffer_get_header(b0));
289 len1 = ((u8*)vlib_buffer_get_current(b1) -
290 (u8*)ethernet_buffer_get_header(b1));
Neale Ranns7bf3f9f2018-04-09 02:25:27 -0700291 vnet_buffer(b0)->l2.l2_len =
292 vnet_buffer(b0)->ip.save_rewrite_length =
293 len0;
294 vnet_buffer(b1)->l2.l2_len =
295 vnet_buffer(b1)->ip.save_rewrite_length =
296 len1;
Damjan Mariondac03522018-02-01 15:30:13 +0100297 b0->flags |= VNET_BUFFER_F_IS_DVR;
298 b1->flags |= VNET_BUFFER_F_IS_DVR;
Neale Rannsf068c3e2018-01-03 04:18:48 -0800299
300 vlib_buffer_advance(b0, -len0);
301 vlib_buffer_advance(b1, -len1);
302
303 vnet_feature_arc_start (lm->output_feature_arc_index,
304 dd0->dd_sw_if_index, &next0, b0);
305 vnet_feature_arc_start (lm->output_feature_arc_index,
306 dd1->dd_sw_if_index, &next1, b1);
307
308 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
309 {
310 dvr_dpo_trace_t *tr0;
311
312 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
313 tr0->sw_if_index = dd0->dd_sw_if_index;
314 }
315 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
316 {
317 dvr_dpo_trace_t *tr1;
318
319 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
320 tr1->sw_if_index = dd1->dd_sw_if_index;
321 }
322
323 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
324 n_left_to_next, bi0, bi1,
325 next0, next1);
326 }
327
328 while (n_left_from > 0 && n_left_to_next > 0)
329 {
330 const dvr_dpo_t * dd0;
331 vlib_buffer_t * b0;
332 u32 bi0, ddi0;
333 u32 next0;
334 u8 len0;
335
336 bi0 = from[0];
337 to_next[0] = bi0;
338 from += 1;
339 to_next += 1;
340 n_left_from -= 1;
341 n_left_to_next -= 1;
342 next0 = 0;
343
344 b0 = vlib_get_buffer (vm, bi0);
345
346 ddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
347 dd0 = dvr_dpo_get(ddi0);
348
349 vnet_buffer(b0)->sw_if_index[VLIB_TX] = dd0->dd_sw_if_index;
350
351 /*
352 * take that, rewind it back...
353 */
354 len0 = ((u8*)vlib_buffer_get_current(b0) -
355 (u8*)ethernet_buffer_get_header(b0));
Neale Ranns7bf3f9f2018-04-09 02:25:27 -0700356 vnet_buffer(b0)->l2.l2_len =
357 vnet_buffer(b0)->ip.save_rewrite_length =
358 len0;
Damjan Mariondac03522018-02-01 15:30:13 +0100359 b0->flags |= VNET_BUFFER_F_IS_DVR;
Neale Rannsf068c3e2018-01-03 04:18:48 -0800360 vlib_buffer_advance(b0, -len0);
361
362 /*
363 * start processing the ipX output features
364 */
365 vnet_feature_arc_start(lm->output_feature_arc_index,
366 dd0->dd_sw_if_index, &next0, b0);
367
368 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
369 {
370 dvr_dpo_trace_t *tr;
371
372 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
373 tr->sw_if_index = dd0->dd_sw_if_index;
374 }
375
376 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
377 n_left_to_next, bi0,
378 next0);
379 }
380 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
381 }
382 return from_frame->n_vectors;
383}
384
385static u8 *
386format_dvr_dpo_trace (u8 * s, va_list * args)
387{
388 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
389 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
390 dvr_dpo_trace_t * t = va_arg (*args, dvr_dpo_trace_t *);
391 u32 indent = format_get_indent (s);
392 s = format (s, "%U sw_if_index:%d",
393 format_white_space, indent,
394 t->sw_if_index);
395 return s;
396}
397
398static uword
399ip4_dvr_dpo (vlib_main_t * vm,
400 vlib_node_runtime_t * node,
401 vlib_frame_t * from_frame)
402{
403 return (dvr_dpo_inline(vm, node, from_frame, 0));
404}
405
406static uword
407ip6_dvr_dpo (vlib_main_t * vm,
408 vlib_node_runtime_t * node,
409 vlib_frame_t * from_frame)
410{
411 return (dvr_dpo_inline(vm, node, from_frame, 1));
412}
413
414VLIB_REGISTER_NODE (ip4_dvr_dpo_node) = {
415 .function = ip4_dvr_dpo,
416 .name = "ip4-dvr-dpo",
417 .vector_size = sizeof (u32),
418 .format_trace = format_dvr_dpo_trace,
419 .sibling_of = "ip4-rewrite",
420};
421VLIB_REGISTER_NODE (ip6_dvr_dpo_node) = {
422 .function = ip6_dvr_dpo,
423 .name = "ip6-dvr-dpo",
424 .vector_size = sizeof (u32),
425 .format_trace = format_dvr_dpo_trace,
426 .sibling_of = "ip6-rewrite",
427};
428
429VLIB_NODE_FUNCTION_MULTIARCH (ip4_dvr_dpo_node, ip4_dvr_dpo)
430VLIB_NODE_FUNCTION_MULTIARCH (ip6_dvr_dpo_node, ip6_dvr_dpo)
431
432typedef enum dvr_reinject_next_t_
433{
434 DVR_REINJECT_OUTPUT = 0,
435} dvr_reinject_next_t;
436
437always_inline uword
438dvr_reinject_inline (vlib_main_t * vm,
439 vlib_node_runtime_t * node,
440 vlib_frame_t * from_frame)
441{
442 u32 n_left_from, next_index, * from, * to_next;
443
444 from = vlib_frame_vector_args (from_frame);
445 n_left_from = from_frame->n_vectors;
446
447 next_index = node->cached_next_index;
448
449 while (n_left_from > 0)
450 {
451 u32 n_left_to_next;
452
453 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
454
455 while (n_left_from >= 4 && n_left_to_next > 2)
456 {
457 dvr_reinject_next_t next0, next1;
458 vlib_buffer_t *b0, *b1;
459 u32 bi0, bi1;
460
461 bi0 = from[0];
462 to_next[0] = bi0;
463 bi1 = from[1];
464 to_next[1] = bi1;
465 from += 2;
466 to_next += 2;
467 n_left_from -= 2;
468 n_left_to_next -= 2;
469
470 b0 = vlib_get_buffer (vm, bi0);
471 b1 = vlib_get_buffer (vm, bi1);
472
Damjan Mariondac03522018-02-01 15:30:13 +0100473 if (b0->flags & VNET_BUFFER_F_IS_DVR)
Neale Rannsf068c3e2018-01-03 04:18:48 -0800474 next0 = DVR_REINJECT_OUTPUT;
475 else
Damjan Marion7d98a122018-07-19 20:42:08 +0200476 vnet_feature_next( &next0, b0);
Neale Rannsf068c3e2018-01-03 04:18:48 -0800477
Damjan Mariondac03522018-02-01 15:30:13 +0100478 if (b1->flags & VNET_BUFFER_F_IS_DVR)
Neale Rannsf068c3e2018-01-03 04:18:48 -0800479 next1 = DVR_REINJECT_OUTPUT;
480 else
Damjan Marion7d98a122018-07-19 20:42:08 +0200481 vnet_feature_next( &next1, b1);
Neale Rannsf068c3e2018-01-03 04:18:48 -0800482
483 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
484 {
485 dvr_dpo_trace_t *tr0;
486
487 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
488 tr0->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
489 }
490 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
491 {
492 dvr_dpo_trace_t *tr1;
493
494 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
495 tr1->sw_if_index = vnet_buffer(b1)->sw_if_index[VLIB_TX];
496 }
497
498 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
499 n_left_to_next, bi0, bi1,
500 next0, next1);
501 }
502
503 while (n_left_from > 0 && n_left_to_next > 0)
504 {
505 dvr_reinject_next_t next0;
506 vlib_buffer_t * b0;
507 u32 bi0;
508
509 bi0 = from[0];
510 to_next[0] = bi0;
511 from += 1;
512 to_next += 1;
513 n_left_from -= 1;
514 n_left_to_next -= 1;
515
516 b0 = vlib_get_buffer (vm, bi0);
517
Damjan Mariondac03522018-02-01 15:30:13 +0100518 if (b0->flags & VNET_BUFFER_F_IS_DVR)
Neale Rannsf068c3e2018-01-03 04:18:48 -0800519 next0 = DVR_REINJECT_OUTPUT;
520 else
Damjan Marion7d98a122018-07-19 20:42:08 +0200521 vnet_feature_next( &next0, b0);
Neale Rannsf068c3e2018-01-03 04:18:48 -0800522
523 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
524 {
525 dvr_dpo_trace_t *tr;
526
527 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
528 tr->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
529 }
530
531 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
532 n_left_to_next, bi0, next0);
533 }
534 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
535 }
536 return from_frame->n_vectors;
537}
538
539static uword
540ip4_dvr_reinject (vlib_main_t * vm,
541 vlib_node_runtime_t * node,
542 vlib_frame_t * from_frame)
543{
544 return (dvr_reinject_inline(vm, node, from_frame));
545}
546
547static uword
548ip6_dvr_reinject (vlib_main_t * vm,
549 vlib_node_runtime_t * node,
550 vlib_frame_t * from_frame)
551{
552 return (dvr_reinject_inline(vm, node, from_frame));
553}
554
555VLIB_REGISTER_NODE (ip4_dvr_reinject_node) = {
556 .function = ip4_dvr_reinject,
557 .name = "ip4-dvr-reinject",
558 .vector_size = sizeof (u32),
559 .format_trace = format_dvr_dpo_trace,
560
561 .n_next_nodes = 1,
562 .next_nodes = {
563 [DVR_REINJECT_OUTPUT] = "l2-output",
564 },
565};
566
567VLIB_REGISTER_NODE (ip6_dvr_reinject_node) = {
568 .function = ip6_dvr_reinject,
569 .name = "ip6-dvr-reinject",
570 .vector_size = sizeof (u32),
571 .format_trace = format_dvr_dpo_trace,
572
573 .n_next_nodes = 1,
574 .next_nodes = {
575 [DVR_REINJECT_OUTPUT] = "l2-output",
576 },
577};
578
579VNET_FEATURE_INIT (ip4_dvr_reinject_feat_node, static) =
580{
581 .arc_name = "ip4-output",
582 .node_name = "ip4-dvr-reinject",
583 .runs_after = VNET_FEATURES ("nat44-in2out-output",
584 "acl-plugin-out-ip4-fa"),
585};
586VNET_FEATURE_INIT (ip6_dvr_reinject_feat_node, static) =
587{
588 .arc_name = "ip6-output",
589 .node_name = "ip6-dvr-reinject",
590 .runs_after = VNET_FEATURES ("acl-plugin-out-ip6-fa"),
591};
592
593VLIB_NODE_FUNCTION_MULTIARCH (ip4_dvr_reinject_node, ip4_dvr_reinject)
594VLIB_NODE_FUNCTION_MULTIARCH (ip6_dvr_reinject_node, ip6_dvr_reinject)