blob: 97a4ff59da7b585bfbeb9cb4e29a113c400b545d [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Dave Barach97d8dc22016-08-15 15:31:15 -04002 * l2_flood.c : layer 2 flooding
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/vnet.h>
20#include <vnet/pg/pg.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vlib/cli.h>
23#include <vnet/l2/l2_input.h>
24#include <vnet/l2/feat_bitmap.h>
25#include <vnet/l2/l2_bvi.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070026#include <vnet/l2/l2_fib.h>
27
28#include <vppinfra/error.h>
29#include <vppinfra/hash.h>
30
31
Billy McFall22aa3e92016-09-09 08:46:40 -040032/**
33 * @file
34 * @brief Ethernet Flooding.
35 *
Ed Warnickecb9cada2015-12-08 15:45:58 -070036 * Flooding uses the packet replication infrastructure to send a copy of the
37 * packet to each member interface. Logically the replication infrastructure
38 * expects two graph nodes: a prep node that initiates replication and sends the
39 * packet to the first destination, and a recycle node that is passed the packet
40 * after it has been transmitted.
41 *
42 * To decrease the amount of code, l2 flooding implements both functions in
43 * the same graph node. This node can tell if is it being called as the "prep"
44 * or "recycle" using replication_is_recycled().
45 */
46
47
Dave Barach97d8dc22016-08-15 15:31:15 -040048typedef struct
49{
Ed Warnickecb9cada2015-12-08 15:45:58 -070050
Dave Barach97d8dc22016-08-15 15:31:15 -040051 /* Next nodes for each feature */
Ed Warnickecb9cada2015-12-08 15:45:58 -070052 u32 feat_next_node_index[32];
Dave Barach97d8dc22016-08-15 15:31:15 -040053
54 /* next node index for the L3 input node of each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -070055 next_by_ethertype_t l3_next;
56
57 /* convenience variables */
Dave Barach97d8dc22016-08-15 15:31:15 -040058 vlib_main_t *vlib_main;
59 vnet_main_t *vnet_main;
Neale Ranns055b2312018-07-20 01:16:04 -070060
61 /* per-cpu vector of cloned packets */
62 u32 **clones;
63 l2_flood_member_t ***members;
Ed Warnickecb9cada2015-12-08 15:45:58 -070064} l2flood_main_t;
65
Dave Barach97d8dc22016-08-15 15:31:15 -040066typedef struct
67{
Ed Warnickecb9cada2015-12-08 15:45:58 -070068 u8 src[6];
69 u8 dst[6];
70 u32 sw_if_index;
71 u16 bd_index;
72} l2flood_trace_t;
73
74
75/* packet trace format function */
Dave Barach97d8dc22016-08-15 15:31:15 -040076static u8 *
77format_l2flood_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070078{
79 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
80 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Dave Barach97d8dc22016-08-15 15:31:15 -040081 l2flood_trace_t *t = va_arg (*args, l2flood_trace_t *);
82
Ed Warnickecb9cada2015-12-08 15:45:58 -070083 s = format (s, "l2-flood: sw_if_index %d dst %U src %U bd_index %d",
Dave Barach97d8dc22016-08-15 15:31:15 -040084 t->sw_if_index,
85 format_ethernet_address, t->dst,
86 format_ethernet_address, t->src, t->bd_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 return s;
88}
89
90l2flood_main_t l2flood_main;
91
92static vlib_node_registration_t l2flood_node;
93
94#define foreach_l2flood_error \
95_(L2FLOOD, "L2 flood packets") \
96_(REPL_FAIL, "L2 replication failures") \
97_(NO_MEMBERS, "L2 replication complete") \
John Lo7185c3b2016-06-04 00:02:37 -040098_(BVI_BAD_MAC, "BVI L3 mac mismatch") \
Ed Warnickecb9cada2015-12-08 15:45:58 -070099_(BVI_ETHERTYPE, "BVI packet with unhandled ethertype")
100
Dave Barach97d8dc22016-08-15 15:31:15 -0400101typedef enum
102{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700103#define _(sym,str) L2FLOOD_ERROR_##sym,
104 foreach_l2flood_error
105#undef _
Dave Barach97d8dc22016-08-15 15:31:15 -0400106 L2FLOOD_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107} l2flood_error_t;
108
Dave Barach97d8dc22016-08-15 15:31:15 -0400109static char *l2flood_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700110#define _(sym,string) string,
111 foreach_l2flood_error
112#undef _
113};
114
Dave Barach97d8dc22016-08-15 15:31:15 -0400115typedef enum
116{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700117 L2FLOOD_NEXT_L2_OUTPUT,
118 L2FLOOD_NEXT_DROP,
119 L2FLOOD_N_NEXT,
120} l2flood_next_t;
121
122/*
123 * Perform flooding on one packet
124 *
125 * Due to the way BVI processing can modify the packet, the BVI interface
126 * (if present) must be processed last in the replication. The member vector
Dave Barach97d8dc22016-08-15 15:31:15 -0400127 * is arranged so that the BVI interface is always the first element.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700128 * Flooding walks the vector in reverse.
129 *
130 * BVI processing causes the packet to go to L3 processing. This strips the
131 * L2 header, which is fine because the replication infrastructure restores
132 * it. However L3 processing can trigger larger changes to the packet. For
133 * example, an ARP request could be turned into an ARP reply, an ICMP request
Dave Barach97d8dc22016-08-15 15:31:15 -0400134 * could be turned into an ICMP reply. If BVI processing is not performed
135 * last, the modified packet would be replicated to the remaining members.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137static uword
138l2flood_node_fn (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400139 vlib_node_runtime_t * node, vlib_frame_t * frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140{
Dave Barach97d8dc22016-08-15 15:31:15 -0400141 u32 n_left_from, *from, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142 l2flood_next_t next_index;
Dave Barach97d8dc22016-08-15 15:31:15 -0400143 l2flood_main_t *msm = &l2flood_main;
Neale Ranns055b2312018-07-20 01:16:04 -0700144 u32 thread_index = vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700145
146 from = vlib_frame_vector_args (frame);
Neale Ranns055b2312018-07-20 01:16:04 -0700147 n_left_from = frame->n_vectors;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148 next_index = node->cached_next_index;
Dave Barach97d8dc22016-08-15 15:31:15 -0400149
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 while (n_left_from > 0)
151 {
152 u32 n_left_to_next;
153
Dave Barach97d8dc22016-08-15 15:31:15 -0400154 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156 while (n_left_from > 0 && n_left_to_next > 0)
157 {
Neale Ranns055b2312018-07-20 01:16:04 -0700158 u16 n_clones, n_cloned, clone0;
159 l2_bridge_domain_t *bd_config;
Neale Rannsc25eb452018-09-12 06:53:03 -0400160 u32 sw_if_index0, bi0, ci0;
Neale Ranns055b2312018-07-20 01:16:04 -0700161 l2_flood_member_t *member;
162 vlib_buffer_t *b0, *c0;
Neale Rannsc25eb452018-09-12 06:53:03 -0400163 u16 next0;
Neale Ranns055b2312018-07-20 01:16:04 -0700164 u8 in_shg;
165 i32 mi;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166
Dave Barach97d8dc22016-08-15 15:31:15 -0400167 /* speculatively enqueue b0 to the current next frame */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168 bi0 = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700169 from += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170 n_left_from -= 1;
John Lofe80c492018-08-09 11:03:21 -0400171 next0 = L2FLOOD_NEXT_L2_OUTPUT;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700172
173 b0 = vlib_get_buffer (vm, bi0);
174
Neale Ranns055b2312018-07-20 01:16:04 -0700175 /* Get config for the bridge domain interface */
176 bd_config = vec_elt_at_index (l2input_main.bd_configs,
177 vnet_buffer (b0)->l2.bd_index);
178 in_shg = vnet_buffer (b0)->l2.shg;
Dave Barach97d8dc22016-08-15 15:31:15 -0400179 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180
Neale Ranns055b2312018-07-20 01:16:04 -0700181 vec_validate (msm->members[thread_index],
182 vec_len (bd_config->members));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183
Neale Ranns055b2312018-07-20 01:16:04 -0700184 vec_reset_length (msm->members[thread_index]);
185
186 /* Find first members that passes the reflection and SHG checks */
187 for (mi = bd_config->flood_count - 1; mi >= 0; mi--)
188 {
189 member = &bd_config->members[mi];
190 if ((member->sw_if_index != sw_if_index0) &&
191 (!in_shg || (member->shg != in_shg)))
192 {
193 vec_add1 (msm->members[thread_index], member);
194 }
195 }
196
197 n_clones = vec_len (msm->members[thread_index]);
198
199 if (0 == n_clones)
200 {
201 /* No members to flood to */
202 to_next[0] = bi0;
203 to_next += 1;
204 n_left_to_next -= 1;
205
206 b0->error = node->errors[L2FLOOD_ERROR_NO_MEMBERS];
207 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
208 to_next, n_left_to_next,
209 bi0, L2FLOOD_NEXT_DROP);
210 continue;
211 }
212
213 vec_validate (msm->clones[thread_index], n_clones);
214 vec_reset_length (msm->clones[thread_index]);
215
216 /*
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700217 * the header offset needs to be large enough to incorporate
Neale Ranns055b2312018-07-20 01:16:04 -0700218 * all the L3 headers that could be touched when doing BVI
219 * processing. So take the current l2 length plus 2 * IPv6
220 * headers (for tunnel encap)
221 */
222 n_cloned = vlib_buffer_clone (vm, bi0,
223 msm->clones[thread_index],
224 n_clones,
225 (vnet_buffer (b0)->l2.l2_len +
226 sizeof (udp_header_t) +
227 2 * sizeof (ip6_header_t)));
228
229 if (PREDICT_FALSE (n_cloned != n_clones))
230 {
231 b0->error = node->errors[L2FLOOD_ERROR_REPL_FAIL];
232 }
233
234 /*
235 * for all but the last clone, these are not BVI bound
236 */
237 for (clone0 = 0; clone0 < n_cloned - 1; clone0++)
238 {
239 member = msm->members[thread_index][clone0];
240 ci0 = msm->clones[thread_index][clone0];
241 c0 = vlib_get_buffer (vm, ci0);
242
243 to_next[0] = ci0;
244 to_next += 1;
245 n_left_to_next -= 1;
246
247 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
248 (b0->flags & VLIB_BUFFER_IS_TRACED)))
249 {
250 ethernet_header_t *h0;
251 l2flood_trace_t *t;
252
253 if (c0 != b0)
254 vlib_buffer_copy_trace_flag (vm, b0, ci0);
255
256 t = vlib_add_trace (vm, node, c0, sizeof (*t));
257 h0 = vlib_buffer_get_current (c0);
258 t->sw_if_index = sw_if_index0;
259 t->bd_index = vnet_buffer (c0)->l2.bd_index;
260 clib_memcpy (t->src, h0->src_address, 6);
261 clib_memcpy (t->dst, h0->dst_address, 6);
262 }
263
264 /* Do normal L2 forwarding */
265 vnet_buffer (c0)->sw_if_index[VLIB_TX] = member->sw_if_index;
Neale Ranns055b2312018-07-20 01:16:04 -0700266
267 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
268 to_next, n_left_to_next,
269 ci0, next0);
270 if (PREDICT_FALSE (0 == n_left_to_next))
271 {
272 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
273 vlib_get_next_frame (vm, node, next_index,
274 to_next, n_left_to_next);
275 }
276 }
277
278 /*
279 * the last clone that might go to a BVI
280 */
281 member = msm->members[thread_index][clone0];
282 ci0 = msm->clones[thread_index][clone0];
283 c0 = vlib_get_buffer (vm, ci0);
284
285 to_next[0] = ci0;
286 to_next += 1;
287 n_left_to_next -= 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288
Dave Barach97d8dc22016-08-15 15:31:15 -0400289 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
290 (b0->flags & VLIB_BUFFER_IS_TRACED)))
291 {
Neale Ranns055b2312018-07-20 01:16:04 -0700292 ethernet_header_t *h0;
293 l2flood_trace_t *t;
294
295 if (c0 != b0)
296 vlib_buffer_copy_trace_flag (vm, b0, ci0);
297
298 t = vlib_add_trace (vm, node, c0, sizeof (*t));
299 h0 = vlib_buffer_get_current (c0);
Dave Barach97d8dc22016-08-15 15:31:15 -0400300 t->sw_if_index = sw_if_index0;
Neale Ranns055b2312018-07-20 01:16:04 -0700301 t->bd_index = vnet_buffer (c0)->l2.bd_index;
Dave Barach97d8dc22016-08-15 15:31:15 -0400302 clib_memcpy (t->src, h0->src_address, 6);
303 clib_memcpy (t->dst, h0->dst_address, 6);
304 }
305
Neale Ranns055b2312018-07-20 01:16:04 -0700306
307 /* Forward packet to the current member */
308 if (PREDICT_FALSE (member->flags & L2_FLOOD_MEMBER_BVI))
309 {
310 /* Do BVI processing */
311 u32 rc;
312 rc = l2_to_bvi (vm,
313 msm->vnet_main,
314 c0, member->sw_if_index, &msm->l3_next, &next0);
315
John Lofe80c492018-08-09 11:03:21 -0400316 if (PREDICT_FALSE (rc != TO_BVI_ERR_OK))
Neale Ranns055b2312018-07-20 01:16:04 -0700317 {
318 if (rc == TO_BVI_ERR_BAD_MAC)
319 {
320 c0->error = node->errors[L2FLOOD_ERROR_BVI_BAD_MAC];
321 }
322 else if (rc == TO_BVI_ERR_ETHERTYPE)
323 {
324 c0->error = node->errors[L2FLOOD_ERROR_BVI_ETHERTYPE];
325 }
John Lofe80c492018-08-09 11:03:21 -0400326 next0 = L2FLOOD_NEXT_DROP;
Neale Ranns055b2312018-07-20 01:16:04 -0700327 }
328 }
329 else
330 {
331 /* Do normal L2 forwarding */
332 vnet_buffer (c0)->sw_if_index[VLIB_TX] = member->sw_if_index;
Neale Ranns055b2312018-07-20 01:16:04 -0700333 }
334
Ed Warnickecb9cada2015-12-08 15:45:58 -0700335 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
336 to_next, n_left_to_next,
Neale Ranns055b2312018-07-20 01:16:04 -0700337 ci0, next0);
338 if (PREDICT_FALSE (0 == n_left_to_next))
339 {
340 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
341 vlib_get_next_frame (vm, node, next_index,
342 to_next, n_left_to_next);
343 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344 }
345
346 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
347 }
348
Neale Ranns055b2312018-07-20 01:16:04 -0700349 vlib_node_increment_counter (vm, node->node_index,
350 L2FLOOD_ERROR_L2FLOOD, frame->n_vectors);
351
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352 return frame->n_vectors;
353}
354
355
Dave Barach97d8dc22016-08-15 15:31:15 -0400356/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357VLIB_REGISTER_NODE (l2flood_node,static) = {
358 .function = l2flood_node_fn,
359 .name = "l2-flood",
360 .vector_size = sizeof (u32),
361 .format_trace = format_l2flood_trace,
362 .type = VLIB_NODE_TYPE_INTERNAL,
Dave Barach97d8dc22016-08-15 15:31:15 -0400363
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364 .n_errors = ARRAY_LEN(l2flood_error_strings),
365 .error_strings = l2flood_error_strings,
366
367 .n_next_nodes = L2FLOOD_N_NEXT,
368
369 /* edit / add dispositions here */
370 .next_nodes = {
371 [L2FLOOD_NEXT_L2_OUTPUT] = "l2-output",
372 [L2FLOOD_NEXT_DROP] = "error-drop",
373 },
374};
375
Damjan Marion1c80e832016-05-11 23:07:18 +0200376VLIB_NODE_FUNCTION_MULTIARCH (l2flood_node, l2flood_node_fn)
Neale Rannsb4743802018-09-05 09:13:57 -0700377/* *INDENT-ON* */
378
379clib_error_t *
380l2flood_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700381{
Dave Barach97d8dc22016-08-15 15:31:15 -0400382 l2flood_main_t *mp = &l2flood_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700383
Dave Barach97d8dc22016-08-15 15:31:15 -0400384 mp->vlib_main = vm;
385 mp->vnet_main = vnet_get_main ();
386
Neale Ranns055b2312018-07-20 01:16:04 -0700387 vec_validate (mp->clones, vlib_num_workers ());
388 vec_validate (mp->members, vlib_num_workers ());
389
Dave Barach97d8dc22016-08-15 15:31:15 -0400390 /* Initialize the feature next-node indexes */
391 feat_bitmap_init_next_nodes (vm,
392 l2flood_node.index,
393 L2INPUT_N_FEAT,
394 l2input_get_feat_names (),
395 mp->feat_next_node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396
Neale Ranns055b2312018-07-20 01:16:04 -0700397 return NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700398}
399
400VLIB_INIT_FUNCTION (l2flood_init);
401
402
403
Chris Luke16bcf7d2016-09-01 14:31:46 -0400404/** Add the L3 input node for this ethertype to the next nodes structure. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700405void
406l2flood_register_input_type (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400407 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700408{
Dave Barach97d8dc22016-08-15 15:31:15 -0400409 l2flood_main_t *mp = &l2flood_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700410 u32 next_index;
411
Dave Barach97d8dc22016-08-15 15:31:15 -0400412 next_index = vlib_node_add_next (vm, l2flood_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700413
414 next_by_ethertype_register (&mp->l3_next, type, next_index);
415}
416
417
Dave Barach97d8dc22016-08-15 15:31:15 -0400418/**
Chris Luke16bcf7d2016-09-01 14:31:46 -0400419 * Set subinterface flood enable/disable.
Dave Barach97d8dc22016-08-15 15:31:15 -0400420 * The CLI format is:
421 * set interface l2 flood <interface> [disable]
422 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700423static clib_error_t *
424int_flood (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400425 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426{
Dave Barach97d8dc22016-08-15 15:31:15 -0400427 vnet_main_t *vnm = vnet_get_main ();
428 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429 u32 sw_if_index;
430 u32 enable;
431
Dave Barach97d8dc22016-08-15 15:31:15 -0400432 if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700433 {
434 error = clib_error_return (0, "unknown interface `%U'",
Dave Barach97d8dc22016-08-15 15:31:15 -0400435 format_unformat_error, input);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436 goto done;
437 }
438
439 enable = 1;
Dave Barach97d8dc22016-08-15 15:31:15 -0400440 if (unformat (input, "disable"))
441 {
442 enable = 0;
443 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700444
Dave Barach97d8dc22016-08-15 15:31:15 -0400445 /* set the interface flag */
446 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_FLOOD, enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447
Dave Barach97d8dc22016-08-15 15:31:15 -0400448done:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700449 return error;
450}
451
Billy McFall22aa3e92016-09-09 08:46:40 -0400452/*?
453 * Layer 2 flooding can be enabled and disabled on each
454 * interface and on each bridge-domain. Use this command to
455 * manage interfaces. It is enabled by default.
456 *
457 * @cliexpar
458 * Example of how to enable flooding:
459 * @cliexcmd{set interface l2 flood GigabitEthernet0/8/0}
460 * Example of how to disable flooding:
461 * @cliexcmd{set interface l2 flood GigabitEthernet0/8/0 disable}
462?*/
Dave Barach97d8dc22016-08-15 15:31:15 -0400463/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700464VLIB_CLI_COMMAND (int_flood_cli, static) = {
465 .path = "set interface l2 flood",
466 .short_help = "set interface l2 flood <interface> [disable]",
467 .function = int_flood,
468};
Dave Barach97d8dc22016-08-15 15:31:15 -0400469/* *INDENT-ON* */
470
471/*
472 * fd.io coding-style-patch-verification: ON
473 *
474 * Local Variables:
475 * eval: (c-set-style "gnu")
476 * End:
477 */