blob: acca349475c998dfe801e18eacd4a937c8294b31 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * l2_output.c : layer 2 output packet processing
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/vnet.h>
20#include <vnet/pg/pg.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vlib/cli.h>
23
24#include <vppinfra/error.h>
25#include <vppinfra/hash.h>
26#include <vnet/l2/feat_bitmap.h>
27#include <vnet/l2/l2_output.h>
28
29
30// Feature graph node names
31static char * l2output_feat_names[] = {
32#define _(sym,name) name,
33 foreach_l2output_feat
34#undef _
35};
36
37char **l2output_get_feat_names(void) {
38 return l2output_feat_names;
39}
40
41l2output_main_t l2output_main;
42
43typedef struct {
44 /* per-pkt trace data */
45 u8 src[6];
46 u8 dst[6];
47 u32 sw_if_index;
48} l2output_trace_t;
49
50/* packet trace format function */
51static u8 * format_l2output_trace (u8 * s, va_list * args)
52{
53 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
54 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
55 l2output_trace_t * t = va_arg (*args, l2output_trace_t *);
56
57 s = format (s, "l2-output: sw_if_index %d dst %U src %U",
58 t->sw_if_index,
59 format_ethernet_address, t->dst,
60 format_ethernet_address, t->src);
61 return s;
62}
63
64
Ed Warnickecb9cada2015-12-08 15:45:58 -070065static char * l2output_error_strings[] = {
66#define _(sym,string) string,
67 foreach_l2output_error
68#undef _
69};
70
Ed Warnickecb9cada2015-12-08 15:45:58 -070071// Return 0 if split horizon check passes, otherwise return non-zero
72// Packets should not be transmitted out an interface with the same
73// split-horizon group as the input interface, except if the shg is 0
74// in which case the check always passes.
75static_always_inline u32
76split_horizon_violation (u8 shg1, u8 shg2)
77{
78 if (PREDICT_TRUE (shg1 == 0)) {
79 return 0;
80 } else {
81 return shg1 == shg2;
82 }
83}
84
85
Jean-Mickael Guerin8941ec22016-03-04 14:14:21 +010086static vlib_node_registration_t l2output_node;
87
Ed Warnickecb9cada2015-12-08 15:45:58 -070088static uword
89l2output_node_fn (vlib_main_t * vm,
90 vlib_node_runtime_t * node,
91 vlib_frame_t * frame)
92{
93 u32 n_left_from, * from, * to_next;
94 l2output_next_t next_index;
95 l2output_main_t * msm = &l2output_main;
96 vlib_node_t *n = vlib_get_node (vm, l2output_node.index);
97 u32 node_counter_base_index = n->error_heap_index;
98 vlib_error_main_t * em = &vm->error_main;
99 u32 cached_sw_if_index;
100 u32 cached_next_index;
101
102 /* Invalidate cache */
103 cached_sw_if_index = ~0;
104 cached_next_index = ~0; /* warning be gone */
105
106 from = vlib_frame_vector_args (frame);
107 n_left_from = frame->n_vectors; /* number of packets to process */
108 next_index = node->cached_next_index;
109
110 while (n_left_from > 0)
111 {
112 u32 n_left_to_next;
113
114 /* get space to enqueue frame to graph node "next_index" */
115 vlib_get_next_frame (vm, node, next_index,
116 to_next, n_left_to_next);
117
118 while (n_left_from >= 6 && n_left_to_next >= 2)
119 {
120 u32 bi0, bi1;
121 vlib_buffer_t * b0, * b1;
122 u32 next0, next1;
123 u32 sw_if_index0, sw_if_index1;
124 ethernet_header_t * h0, * h1;
125 l2_output_config_t * config0, * config1;
126 u32 feature_bitmap0, feature_bitmap1;
127
128 /* Prefetch next iteration. */
129 {
130 vlib_buffer_t * p2, * p3, * p4 , * p5;
131 u32 sw_if_index2, sw_if_index3;
132
133 p2 = vlib_get_buffer (vm, from[2]);
134 p3 = vlib_get_buffer (vm, from[3]);
135 p4 = vlib_get_buffer (vm, from[4]);
136 p5 = vlib_get_buffer (vm, from[5]);
137
138 // Prefetch the buffer header for the N+2 loop iteration
139 vlib_prefetch_buffer_header (p4, LOAD);
140 vlib_prefetch_buffer_header (p5, LOAD);
141 // Note: no need to prefetch packet data. This node doesn't reference it.
142
143 // Prefetch the input config for the N+1 loop iteration
144 // This depends on the buffer header above
145 sw_if_index2 = vnet_buffer(p2)->sw_if_index[VLIB_TX];
146 sw_if_index3 = vnet_buffer(p3)->sw_if_index[VLIB_TX];
147 CLIB_PREFETCH (&msm->configs[sw_if_index2], CLIB_CACHE_LINE_BYTES, LOAD);
148 CLIB_PREFETCH (&msm->configs[sw_if_index3], CLIB_CACHE_LINE_BYTES, LOAD);
149 }
150
151 /* speculatively enqueue b0 and b1 to the current next frame */
152 /* bi is "buffer index", b is pointer to the buffer */
153 to_next[0] = bi0 = from[0];
154 to_next[1] = bi1 = from[1];
155 from += 2;
156 to_next += 2;
157 n_left_from -= 2;
158 n_left_to_next -= 2;
159
160 b0 = vlib_get_buffer (vm, bi0);
161 b1 = vlib_get_buffer (vm, bi1);
162
163 /* TX interface handles */
164 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
165 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
166
167 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
168 {
169 h0 = vlib_buffer_get_current (b0);
170 h1 = vlib_buffer_get_current (b1);
171 if (b0->flags & VLIB_BUFFER_IS_TRACED)
172 {
173 l2output_trace_t *t =
174 vlib_add_trace (vm, node, b0, sizeof (*t));
175 t->sw_if_index = sw_if_index0;
Damjan Marionf1213b82016-03-13 02:22:06 +0100176 clib_memcpy(t->src, h0->src_address, 6);
177 clib_memcpy(t->dst, h0->dst_address, 6);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700178 }
179 if (b1->flags & VLIB_BUFFER_IS_TRACED)
180 {
181 l2output_trace_t *t =
182 vlib_add_trace (vm, node, b1, sizeof (*t));
183 t->sw_if_index = sw_if_index1;
Damjan Marionf1213b82016-03-13 02:22:06 +0100184 clib_memcpy(t->src, h1->src_address, 6);
185 clib_memcpy(t->dst, h1->dst_address, 6);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186 }
187 }
188
189 em->counters[node_counter_base_index + L2OUTPUT_ERROR_L2OUTPUT] += 2;
190
191 // Get config for the output interface
192 config0 = vec_elt_at_index(msm->configs, sw_if_index0);
193 config1 = vec_elt_at_index(msm->configs, sw_if_index1);
194
195 // Get features from the config
196 // TODO: mask out any non-applicable features
197 feature_bitmap0 = config0->feature_bitmap;
198 feature_bitmap1 = config1->feature_bitmap;
199
200 // Determine next node
201 l2_output_dispatch (msm->vlib_main,
202 msm->vnet_main,
203 node,
204 l2output_node.index,
205 &cached_sw_if_index,
206 &cached_next_index,
207 &msm->next_nodes,
208 b0,
209 sw_if_index0,
210 feature_bitmap0,
211 &next0);
212
213 l2_output_dispatch (msm->vlib_main,
214 msm->vnet_main,
215 node,
216 l2output_node.index,
217 &cached_sw_if_index,
218 &cached_next_index,
219 &msm->next_nodes,
220 b1,
221 sw_if_index1,
222 feature_bitmap1,
223 &next1);
224
225 // Perform output vlan tag rewrite and the pre-vtr EFP filter check.
226 // The EFP Filter only needs to be run if there is an output VTR
227 // configured. The flag for the post-vtr EFP Filter node is used
228 // to trigger the pre-vtr check as well.
229
230 if (PREDICT_FALSE (config0->output_vtr.push_and_pop_bytes)) {
231 // Perform pre-vtr EFP filter check if configured
232 u32 failed1 = (feature_bitmap0 & L2OUTPUT_FEAT_EFP_FILTER) &&
233 (l2_efp_filter_process(b0, &(config0->input_vtr)));
234 u32 failed2 = l2_vtr_process(b0, &(config0->output_vtr));
235
236 if (PREDICT_FALSE (failed1 | failed2)) {
237 next0 = L2OUTPUT_NEXT_DROP;
238 if (failed2) {
239 b0->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
240 }
241 if (failed1) {
242 b0->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
243 }
244 }
245 }
246
247 if (PREDICT_FALSE (config1->output_vtr.push_and_pop_bytes)) {
248 // Perform pre-vtr EFP filter check if configured
249 u32 failed1 = (feature_bitmap1 & L2OUTPUT_FEAT_EFP_FILTER) &&
250 (l2_efp_filter_process(b1, &(config1->input_vtr)));
251 u32 failed2 = l2_vtr_process(b1, &(config1->output_vtr));
252
253 if (PREDICT_FALSE (failed1 | failed2)) {
254 next1 = L2OUTPUT_NEXT_DROP;
255 if (failed2) {
256 b1->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
257 }
258 if (failed1) {
259 b1->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
260 }
261 }
262 }
263
264 // Perform the split horizon check
265 // The check can only fail for non-zero shg's
266 if (PREDICT_FALSE (config0->shg + config1->shg)) {
267 // one of the checks might fail, check both
268 if (split_horizon_violation (config0->shg, vnet_buffer(b0)->l2.shg)) {
269 next0 = L2OUTPUT_NEXT_DROP;
270 b0->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
271 }
272 if (split_horizon_violation (config1->shg, vnet_buffer(b1)->l2.shg)) {
273 next1 = L2OUTPUT_NEXT_DROP;
274 b1->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
275 }
276 }
277
278 /* verify speculative enqueues, maybe switch current next frame */
279 /* if next0==next1==next_index then nothing special needs to be done */
280 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
281 to_next, n_left_to_next,
282 bi0, bi1, next0, next1);
283 }
284
285 while (n_left_from > 0 && n_left_to_next > 0)
286 {
287 u32 bi0;
288 vlib_buffer_t * b0;
289 u32 next0;
290 u32 sw_if_index0;
291 ethernet_header_t * h0;
292 l2_output_config_t *config0;
293 u32 feature_bitmap0;
294
295 /* speculatively enqueue b0 to the current next frame */
296 bi0 = from[0];
297 to_next[0] = bi0;
298 from += 1;
299 to_next += 1;
300 n_left_from -= 1;
301 n_left_to_next -= 1;
302
303 b0 = vlib_get_buffer (vm, bi0);
304
305 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
306
307 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
308 && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
309 l2output_trace_t *t =
310 vlib_add_trace (vm, node, b0, sizeof (*t));
311 t->sw_if_index = sw_if_index0;
312 h0 = vlib_buffer_get_current (b0);
Damjan Marionf1213b82016-03-13 02:22:06 +0100313 clib_memcpy(t->src, h0->src_address, 6);
314 clib_memcpy(t->dst, h0->dst_address, 6);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315 }
316
317 em->counters[node_counter_base_index + L2OUTPUT_ERROR_L2OUTPUT] += 1;
318
319 // Get config for the output interface
320 config0 = vec_elt_at_index(msm->configs, sw_if_index0);
321
322 // Get features from the config
323 // TODO: mask out any non-applicable features
324 feature_bitmap0 = config0->feature_bitmap;
325
326 // Determine next node
327 l2_output_dispatch (msm->vlib_main,
328 msm->vnet_main,
329 node,
330 l2output_node.index,
331 &cached_sw_if_index,
332 &cached_next_index,
333 &msm->next_nodes,
334 b0,
335 sw_if_index0,
336 feature_bitmap0,
337 &next0);
338
339 // Perform output vlan tag rewrite and the pre-vtr EFP filter check.
340 // The EFP Filter only needs to be run if there is an output VTR
341 // configured. The flag for the post-vtr EFP Filter node is used
342 // to trigger the pre-vtr check as well.
343
344 if (config0->output_vtr.push_and_pop_bytes) {
345 // Perform pre-vtr EFP filter check if configured
346 u32 failed1 = (feature_bitmap0 & L2OUTPUT_FEAT_EFP_FILTER) &&
347 (l2_efp_filter_process(b0, &(config0->input_vtr)));
348 u32 failed2 = l2_vtr_process(b0, &(config0->output_vtr));
349
350 if (PREDICT_FALSE (failed1 | failed2)) {
351 next0 = L2OUTPUT_NEXT_DROP;
352 if (failed2) {
353 b0->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
354 }
355 if (failed1) {
356 b0->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
357 }
358 }
359 }
360
361 // Perform the split horizon check
362 if (PREDICT_FALSE (split_horizon_violation (config0->shg, vnet_buffer(b0)->l2.shg))) {
363 next0 = L2OUTPUT_NEXT_DROP;
364 b0->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
365 }
366
367 /* verify speculative enqueue, maybe switch current next frame */
368 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
369 to_next, n_left_to_next,
370 bi0, next0);
371 }
372
373 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
374 }
375
376 return frame->n_vectors;
377}
378
379
Jean-Mickael Guerin8941ec22016-03-04 14:14:21 +0100380VLIB_REGISTER_NODE (l2output_node,static) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700381 .function = l2output_node_fn,
382 .name = "l2-output",
383 .vector_size = sizeof (u32),
384 .format_trace = format_l2output_trace,
385 .type = VLIB_NODE_TYPE_INTERNAL,
386
387 .n_errors = ARRAY_LEN(l2output_error_strings),
388 .error_strings = l2output_error_strings,
389
390 .n_next_nodes = L2OUTPUT_N_NEXT,
391
392 /* edit / add dispositions here */
393 .next_nodes = {
394 [L2OUTPUT_NEXT_DROP] = "error-drop",
John Lo3ef822e2016-06-07 09:14:07 -0400395 [L2OUTPUT_NEXT_DEL_TUNNEL] = "l2-output-del-tunnel",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396 },
397};
398
John Lo3ef822e2016-06-07 09:14:07 -0400399
400#define foreach_l2output_del_tunnel_error \
401_(DROP, "L2 output to deleted tunnel")
402
403static char * l2output_del_tunnel_error_strings[] = {
404#define _(sym,string) string,
405 foreach_l2output_del_tunnel_error
406#undef _
407};
408
409typedef enum {
410#define _(sym,str) L2OUTPUT_DEL_TUNNEL_ERROR_##sym,
411 foreach_l2output_del_tunnel_error
412#undef _
413 L2OUTPUT_DEL_TUNNEL_N_ERROR,
414} l2output_del_tunnel_error_t;
415
416
417// Output node for tunnels which was in L2 BD's but were deleted.
418// On deletion of any tunnel which was on a L2 BD, its entry in
419// l2_output_main table next_nodes.output_node_index_vec[sw_if_index]
420// MUST be set to the value of L2OUTPUT_NEXT_DEL_TUNNEL. Thus, if there
421// are stale entries in the L2FIB for this tunnel sw_if_index, l2-output
422// will send packets for this sw_if_index to the l2-output-tunnel-del
423// node which just setup the proper drop reason before sending packets
424// to the error-drop node to drop the packet. Then, stale L2FIB entries
425// for delted tunnels won't cause possible packet or memory corrpution.
426static vlib_node_registration_t l2output_del_tunnel_node;
427
428static uword
429l2output_del_tunnel_node_fn (vlib_main_t * vm,
430 vlib_node_runtime_t * node,
431 vlib_frame_t * frame)
432{
433 u32 n_left_from, * from, * to_next;
434 l2output_next_t next_index = 0;
435
436 from = vlib_frame_vector_args (frame);
437 n_left_from = frame->n_vectors; // number of packets to process
438
439 while (n_left_from > 0)
440 {
441 u32 n_left_to_next;
442
443 // get space to enqueue frame to graph node "next_index"
444 vlib_get_next_frame (vm, node, next_index,
445 to_next, n_left_to_next);
446
447 while (n_left_from >= 4 && n_left_to_next >= 2)
448 {
449 u32 bi0, bi1;
450 vlib_buffer_t * b0, * b1;
451
452 to_next[0] = bi0 = from[0];
453 to_next[1] = bi1 = from[1];
454 from += 2;
455 to_next += 2;
456 n_left_from -= 2;
457 n_left_to_next -= 2;
458 b0 = vlib_get_buffer (vm, bi0);
459 b1 = vlib_get_buffer (vm, bi1);
460 b0->error = node->errors[L2OUTPUT_DEL_TUNNEL_ERROR_DROP];
461 b1->error = node->errors[L2OUTPUT_DEL_TUNNEL_ERROR_DROP];
462 }
463
464 while (n_left_from > 0 && n_left_to_next > 0)
465 {
466 u32 bi0;
467 vlib_buffer_t * b0;
468
469 bi0 = from[0];
470 to_next[0] = bi0;
471 from += 1;
472 to_next += 1;
473 n_left_from -= 1;
474 n_left_to_next -= 1;
475 b0 = vlib_get_buffer (vm, bi0);
476 b0->error = node->errors[L2OUTPUT_DEL_TUNNEL_ERROR_DROP];
477 }
478
479 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
480 }
481
482 return frame->n_vectors;
483}
484
485VLIB_REGISTER_NODE (l2output_del_tunnel_node,static) = {
486 .function = l2output_del_tunnel_node_fn,
487 .name = "l2-output-del-tunnel",
488 .vector_size = sizeof (u32),
489 .type = VLIB_NODE_TYPE_INTERNAL,
490
491 .n_errors = ARRAY_LEN(l2output_del_tunnel_error_strings),
492 .error_strings = l2output_del_tunnel_error_strings,
493
494 .n_next_nodes = 1,
495
496 /* edit / add dispositions here */
497 .next_nodes = {
498 [0] = "error-drop",
499 },
500};
501
502
Damjan Marion1c80e832016-05-11 23:07:18 +0200503VLIB_NODE_FUNCTION_MULTIARCH (l2output_node, l2output_node_fn)
504
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505clib_error_t *l2output_init (vlib_main_t *vm)
506{
507 l2output_main_t * mp = &l2output_main;
508
509 mp->vlib_main = vm;
510 mp->vnet_main = vnet_get_main();
511
512 // Create the config vector
513 vec_validate(mp->configs, 100);
514 // Until we hook up the CLI config, just create 100 sw interface entries and zero them
515
516 // Initialize the feature next-node indexes
517 feat_bitmap_init_next_nodes(vm,
518 l2output_node.index,
519 L2OUTPUT_N_FEAT,
520 l2output_get_feat_names(),
521 mp->next_nodes.feat_next_node_index);
522
523 // Initialize the output node mapping table
524 l2output_init_output_node_vec(&mp->next_nodes.output_node_index_vec);
525
526 return 0;
527}
528
529VLIB_INIT_FUNCTION (l2output_init);
530
531typedef struct {
532 u32 node_index;
533 u32 sw_if_index;
534} output_node_mapping_rpc_args_t;
535
536#if DPDK > 0
537static void output_node_rpc_callback
538( output_node_mapping_rpc_args_t * a);
539
540static void output_node_mapping_send_rpc
541(u32 node_index,
542 u32 sw_if_index)
543{
544 output_node_mapping_rpc_args_t args;
Dave Barachf9bd6202015-12-14 13:22:11 -0500545 void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700546
547 args.node_index = node_index;
548 args.sw_if_index = sw_if_index;
549
550 vl_api_rpc_call_main_thread (output_node_rpc_callback,
551 (u8 *) &args, sizeof (args));
552}
553#endif
554
555
556// Create a mapping in the next node mapping table for the given sw_if_index
557u32 l2output_create_output_node_mapping (
558 vlib_main_t * vlib_main,
559 vnet_main_t * vnet_main,
560 u32 node_index, // index of current node
561 u32 * output_node_index_vec,
562 u32 sw_if_index) {
563
564 u32 next; // index of next graph node
565 vnet_hw_interface_t *hw0;
566 u32 *node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567
Damjan Mariondb2c6c62015-12-16 19:31:59 +0100568 hw0 = vnet_get_sup_hw_interface (vnet_main, sw_if_index);
569
Shesha Sreenivasamurthy49be7f02016-01-14 14:11:38 -0800570#if DPDK > 0
571 uword cpu_number;
572
Ed Warnickecb9cada2015-12-08 15:45:58 -0700573 cpu_number = os_get_cpu_number();
574
575 if (cpu_number)
576 {
Damjan Mariondb2c6c62015-12-16 19:31:59 +0100577 u32 oldflags;
Damjan Mariondb2c6c62015-12-16 19:31:59 +0100578
Damjan Mariondb2c6c62015-12-16 19:31:59 +0100579 oldflags = __sync_fetch_and_or(&hw0->flags,
580 VNET_HW_INTERFACE_FLAG_L2OUTPUT_MAPPED);
581
582 if ((oldflags & VNET_HW_INTERFACE_FLAG_L2OUTPUT_MAPPED) )
Damjan Marionf46b7e62016-01-27 16:17:31 +0100583 return L2OUTPUT_NEXT_DROP;
Damjan Mariondb2c6c62015-12-16 19:31:59 +0100584
Ed Warnickecb9cada2015-12-08 15:45:58 -0700585 output_node_mapping_send_rpc (node_index, sw_if_index);
Damjan Marionf46b7e62016-01-27 16:17:31 +0100586 return L2OUTPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700587 }
588#endif
589
Ed Warnickecb9cada2015-12-08 15:45:58 -0700590 // dynamically create graph node arc
591 next = vlib_node_add_next (vlib_main,
592 node_index,
593 hw0->output_node_index);
594
595 // Initialize vector with the mapping
596
597 node = vec_elt_at_index(output_node_index_vec, sw_if_index);
598 *node = next;
599
600 return next;
601}
602
603#if DPDK > 0
604void output_node_rpc_callback (output_node_mapping_rpc_args_t *a)
605{
606 vlib_main_t * vm = vlib_get_main();
607 vnet_main_t * vnm = vnet_get_main();
608 l2output_main_t * mp = &l2output_main;
609
610 (void) l2output_create_output_node_mapping
611 (vm, vnm, a->node_index, mp->next_nodes.output_node_index_vec,
612 a->sw_if_index);
613}
614#endif
615
616// Get a pointer to the config for the given interface
617l2_output_config_t * l2output_intf_config (u32 sw_if_index)
618{
619 l2output_main_t * mp = &l2output_main;
620
621 vec_validate(mp->configs, sw_if_index);
622 return vec_elt_at_index(mp->configs, sw_if_index);
623}
624
625// Enable (or disable) the feature in the bitmap for the given interface
626void l2output_intf_bitmap_enable (u32 sw_if_index,
627 u32 feature_bitmap,
628 u32 enable)
629{
630 l2output_main_t * mp = &l2output_main;
631 l2_output_config_t *config;
632
633 vec_validate(mp->configs, sw_if_index);
634 config = vec_elt_at_index(mp->configs, sw_if_index);
635
636 if (enable) {
637 config->feature_bitmap |= feature_bitmap;
638 } else {
639 config->feature_bitmap &= ~feature_bitmap;
640 }
641}