blob: 93e69db31e9125e01526ee53b8c3f640a6f9de25 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * l2_fwd.c : layer 2 forwarding using l2fib
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/vnet.h>
20#include <vnet/pg/pg.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vlib/cli.h>
23
24#include <vnet/l2/l2_input.h>
25#include <vnet/l2/l2_bvi.h>
26#include <vnet/l2/l2_fwd.h>
27#include <vnet/l2/l2_fib.h>
Neale Ranns3b81a1e2018-09-06 09:50:26 -070028#include <vnet/l2/feat_bitmap.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070029
30#include <vppinfra/error.h>
31#include <vppinfra/hash.h>
32#include <vppinfra/sparse_vec.h>
33
34
Billy McFall22aa3e92016-09-09 08:46:40 -040035/**
36 * @file
37 * @brief Ethernet Forwarding.
38 *
39 * Code in this file handles forwarding Layer 2 packets. This file calls
40 * the FIB lookup, packet learning and the packet flooding as necessary.
41 * Packet is then sent to the next graph node.
42 */
43
Dave Barach97d8dc22016-08-15 15:31:15 -040044typedef struct
45{
Ed Warnickecb9cada2015-12-08 15:45:58 -070046
Dave Barach97d8dc22016-08-15 15:31:15 -040047 /* Hash table */
48 BVT (clib_bihash) * mac_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Dave Barach97d8dc22016-08-15 15:31:15 -040050 /* next node index for the L3 input node of each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -070051 next_by_ethertype_t l3_next;
52
John Lo9a719292018-04-05 14:52:07 -040053 /* Next nodes for each feature */
54 u32 feat_next_node_index[32];
55
Ed Warnickecb9cada2015-12-08 15:45:58 -070056 /* convenience variables */
Dave Barach97d8dc22016-08-15 15:31:15 -040057 vlib_main_t *vlib_main;
58 vnet_main_t *vnet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -070059} l2fwd_main_t;
60
Dave Barach97d8dc22016-08-15 15:31:15 -040061typedef struct
62{
63 /* per-pkt trace data */
Ed Warnickecb9cada2015-12-08 15:45:58 -070064 u8 src[6];
65 u8 dst[6];
66 u32 sw_if_index;
67 u16 bd_index;
Neale Ranns7d645f72018-10-24 02:25:06 -070068 l2fib_entry_result_t result;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} l2fwd_trace_t;
70
71/* packet trace format function */
Dave Barach97d8dc22016-08-15 15:31:15 -040072static u8 *
73format_l2fwd_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070074{
75 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
76 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Dave Barach97d8dc22016-08-15 15:31:15 -040077 l2fwd_trace_t *t = va_arg (*args, l2fwd_trace_t *);
78
Neale Ranns7d645f72018-10-24 02:25:06 -070079 s =
80 format (s,
81 "l2-fwd: sw_if_index %d dst %U src %U bd_index %d result [0x%llx, %d] %U",
82 t->sw_if_index, format_ethernet_address, t->dst,
83 format_ethernet_address, t->src, t->bd_index, t->result.raw,
84 t->result.fields.sw_if_index, format_l2fib_entry_result_flags,
85 t->result.fields.flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -070086 return s;
87}
88
Neale Rannsc25eb452018-09-12 06:53:03 -040089#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -070090l2fwd_main_t l2fwd_main;
Neale Rannsc25eb452018-09-12 06:53:03 -040091#else
92extern l2fwd_main_t l2fwd_main;
93#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070094
Damjan Mariond770cfc2019-09-02 19:00:33 +020095extern vlib_node_registration_t l2fwd_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97#define foreach_l2fwd_error \
98_(L2FWD, "L2 forward packets") \
99_(FLOOD, "L2 forward misses") \
100_(HIT, "L2 forward hits") \
John Lo7185c3b2016-06-04 00:02:37 -0400101_(BVI_BAD_MAC, "BVI L3 MAC mismatch") \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102_(BVI_ETHERTYPE, "BVI packet with unhandled ethertype") \
103_(FILTER_DROP, "Filter Mac Drop") \
Eyal Bari0f360dc2017-06-14 13:11:20 +0300104_(REFLECT_DROP, "Reflection Drop") \
105_(STALE_DROP, "Stale entry Drop")
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106
Dave Barach97d8dc22016-08-15 15:31:15 -0400107typedef enum
108{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109#define _(sym,str) L2FWD_ERROR_##sym,
110 foreach_l2fwd_error
111#undef _
Dave Barach97d8dc22016-08-15 15:31:15 -0400112 L2FWD_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113} l2fwd_error_t;
114
Dave Barach97d8dc22016-08-15 15:31:15 -0400115static char *l2fwd_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116#define _(sym,string) string,
117 foreach_l2fwd_error
118#undef _
119};
120
Dave Barach97d8dc22016-08-15 15:31:15 -0400121typedef enum
122{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123 L2FWD_NEXT_L2_OUTPUT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124 L2FWD_NEXT_DROP,
125 L2FWD_N_NEXT,
126} l2fwd_next_t;
127
Chris Luke16bcf7d2016-09-01 14:31:46 -0400128/** Forward one packet based on the mac table lookup result. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700129
130static_always_inline void
131l2fwd_process (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400132 vlib_node_runtime_t * node,
133 l2fwd_main_t * msm,
134 vlib_error_main_t * em,
135 vlib_buffer_t * b0,
Neale Rannsc25eb452018-09-12 06:53:03 -0400136 u32 sw_if_index0, l2fib_entry_result_t * result0, u16 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137{
Eyal Bari0f360dc2017-06-14 13:11:20 +0300138 int try_flood = result0->raw == ~0;
139 int flood_error;
Dave Barach97d8dc22016-08-15 15:31:15 -0400140
Eyal Bari0f360dc2017-06-14 13:11:20 +0300141 if (PREDICT_FALSE (try_flood))
142 {
143 flood_error = L2FWD_ERROR_FLOOD;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400145 else
146 {
Dave Barach97d8dc22016-08-15 15:31:15 -0400147 /* lookup hit, forward packet */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148#ifdef COUNTERS
Dave Barach97d8dc22016-08-15 15:31:15 -0400149 em->counters[node_counter_base_index + L2FWD_ERROR_HIT] += 1;
150#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151
Dave Barach97d8dc22016-08-15 15:31:15 -0400152 vnet_buffer (b0)->sw_if_index[VLIB_TX] = result0->fields.sw_if_index;
153 *next0 = L2FWD_NEXT_L2_OUTPUT;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300154 int l2fib_seq_num_valid = 1;
John Lo8d00fff2017-08-03 00:35:36 -0400155
Eyal Bari0f360dc2017-06-14 13:11:20 +0300156 /* check l2fib seq num for stale entries */
Neale Rannsb54d0812018-09-06 06:22:56 -0700157 if (!l2fib_entry_result_is_set_AGE_NOT (result0))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300158 {
159 l2fib_seq_num_t in_sn = {.as_u16 = vnet_buffer (b0)->l2.l2fib_sn };
160 l2fib_seq_num_t expected_sn = {
161 .bd = in_sn.bd,
162 .swif = *l2fib_swif_seq_num (result0->fields.sw_if_index),
163 };
164 l2fib_seq_num_valid =
165 expected_sn.as_u16 == result0->fields.sn.as_u16;
166 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167
Eyal Bari0f360dc2017-06-14 13:11:20 +0300168 if (PREDICT_FALSE (!l2fib_seq_num_valid))
169 {
170 flood_error = L2FWD_ERROR_STALE_DROP;
171 try_flood = 1;
172 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400173 /* perform reflection check */
Eyal Bari0f360dc2017-06-14 13:11:20 +0300174 else if (PREDICT_FALSE (sw_if_index0 == result0->fields.sw_if_index))
Dave Barach97d8dc22016-08-15 15:31:15 -0400175 {
176 b0->error = node->errors[L2FWD_ERROR_REFLECT_DROP];
177 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400178 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300179 /* perform filter check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700180 else if (PREDICT_FALSE (l2fib_entry_result_is_set_FILTER (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400181 {
182 b0->error = node->errors[L2FWD_ERROR_FILTER_DROP];
183 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400184 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300185 /* perform BVI check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700186 else if (PREDICT_FALSE (l2fib_entry_result_is_set_BVI (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400187 {
188 u32 rc;
189 rc = l2_to_bvi (vm,
190 msm->vnet_main,
191 b0,
192 vnet_buffer (b0)->sw_if_index[VLIB_TX],
193 &msm->l3_next, next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194
Dave Barach97d8dc22016-08-15 15:31:15 -0400195 if (PREDICT_FALSE (rc))
196 {
197 if (rc == TO_BVI_ERR_BAD_MAC)
198 {
199 b0->error = node->errors[L2FWD_ERROR_BVI_BAD_MAC];
200 *next0 = L2FWD_NEXT_DROP;
201 }
202 else if (rc == TO_BVI_ERR_ETHERTYPE)
203 {
204 b0->error = node->errors[L2FWD_ERROR_BVI_ETHERTYPE];
205 *next0 = L2FWD_NEXT_DROP;
206 }
207 }
208 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700209 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300210
211 /* flood */
212 if (PREDICT_FALSE (try_flood))
213 {
214 /*
John Lo9a719292018-04-05 14:52:07 -0400215 * lookup miss, so flood which is typically the next feature
216 * unless some other feature is inserted before uu_flood
Eyal Bari0f360dc2017-06-14 13:11:20 +0300217 */
Neale Rannsb4743802018-09-05 09:13:57 -0700218 if (vnet_buffer (b0)->l2.feature_bitmap &
Neale Ranns7d645f72018-10-24 02:25:06 -0700219 (L2INPUT_FEAT_UU_FLOOD |
220 L2INPUT_FEAT_UU_FWD | L2INPUT_FEAT_GBP_FWD))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300221 {
John Lo9a719292018-04-05 14:52:07 -0400222 *next0 = vnet_l2_feature_next (b0, msm->feat_next_node_index,
223 L2INPUT_FEAT_FWD);
Eyal Bari0f360dc2017-06-14 13:11:20 +0300224 }
225 else
226 {
227 /* Flooding is disabled */
228 b0->error = node->errors[flood_error];
229 *next0 = L2FWD_NEXT_DROP;
230 }
231 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232}
233
234
Dave Barach681abe42017-02-15 09:01:01 -0500235static_always_inline uword
236l2fwd_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
237 vlib_frame_t * frame, int do_trace)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238{
Neale Rannsc25eb452018-09-12 06:53:03 -0400239 u32 n_left, *from;
Dave Barach97d8dc22016-08-15 15:31:15 -0400240 l2fwd_main_t *msm = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241 vlib_node_t *n = vlib_get_node (vm, l2fwd_node.index);
Dave Barach97d8dc22016-08-15 15:31:15 -0400242 CLIB_UNUSED (u32 node_counter_base_index) = n->error_heap_index;
243 vlib_error_main_t *em = &vm->error_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244 l2fib_entry_key_t cached_key;
245 l2fib_entry_result_t cached_result;
Neale Rannsc25eb452018-09-12 06:53:03 -0400246 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
247 u16 nexts[VLIB_FRAME_SIZE], *next;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248
Dave Barach97d8dc22016-08-15 15:31:15 -0400249 /* Clear the one-entry cache in case mac table was updated */
250 cached_key.raw = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251 cached_result.raw = ~0;
252
253 from = vlib_frame_vector_args (frame);
Neale Rannsc25eb452018-09-12 06:53:03 -0400254 n_left = frame->n_vectors; /* number of packets to process */
255 vlib_get_buffers (vm, from, bufs, n_left);
256 next = nexts;
257 b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258
Neale Rannsc25eb452018-09-12 06:53:03 -0400259 while (n_left >= 8)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260 {
Neale Rannsc25eb452018-09-12 06:53:03 -0400261 u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
262 const ethernet_header_t *h0, *h1, *h2, *h3;
263 l2fib_entry_key_t key0, key1, key2, key3;
264 l2fib_entry_result_t result0, result1, result2, result3;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
Neale Rannsc25eb452018-09-12 06:53:03 -0400266 /* Prefetch next iteration. */
267 {
268 vlib_prefetch_buffer_header (b[4], LOAD);
269 vlib_prefetch_buffer_header (b[5], LOAD);
270 vlib_prefetch_buffer_header (b[6], LOAD);
271 vlib_prefetch_buffer_header (b[7], LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272
Neale Rannsc25eb452018-09-12 06:53:03 -0400273 CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
274 CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
275 CLIB_PREFETCH (b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
276 CLIB_PREFETCH (b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
277 }
278
279 /* RX interface handles */
280 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
281 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
282 sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
283 sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
284
285 h0 = vlib_buffer_get_current (b[0]);
286 h1 = vlib_buffer_get_current (b[1]);
287 h2 = vlib_buffer_get_current (b[2]);
288 h3 = vlib_buffer_get_current (b[3]);
289
Neale Rannsc25eb452018-09-12 06:53:03 -0400290#ifdef COUNTERS
291 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 4;
292#endif
293 /* *INDENT-OFF* */
294 l2fib_lookup_4 (msm->mac_table, &cached_key, &cached_result,
295 h0->dst_address, h1->dst_address,
296 h2->dst_address, h3->dst_address,
297 vnet_buffer (b[0])->l2.bd_index,
298 vnet_buffer (b[1])->l2.bd_index,
299 vnet_buffer (b[2])->l2.bd_index,
300 vnet_buffer (b[3])->l2.bd_index,
301 &key0, /* not used */
302 &key1, /* not used */
303 &key2, /* not used */
304 &key3, /* not used */
Neale Rannsc25eb452018-09-12 06:53:03 -0400305 &result0,
306 &result1,
307 &result2,
308 &result3);
309 /* *INDENT-ON* */
310 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
311 l2fwd_process (vm, node, msm, em, b[1], sw_if_index1, &result1,
312 next + 1);
313 l2fwd_process (vm, node, msm, em, b[2], sw_if_index2, &result2,
314 next + 2);
315 l2fwd_process (vm, node, msm, em, b[3], sw_if_index3, &result3,
316 next + 3);
317
318 /* verify speculative enqueues, maybe switch current next frame */
319 /* if next0==next1==next_index then nothing special needs to be done */
Neale Ranns7d645f72018-10-24 02:25:06 -0700320 if (do_trace)
321 {
322 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
323 {
324 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
325 t->sw_if_index = sw_if_index0;
326 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Dave Barach178cf492018-11-13 16:34:13 -0500327 clib_memcpy_fast (t->src, h0->src_address, 6);
328 clib_memcpy_fast (t->dst, h0->dst_address, 6);
Neale Ranns7d645f72018-10-24 02:25:06 -0700329 t->result = result0;
330 }
331 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
332 {
333 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[1], sizeof (*t));
334 t->sw_if_index = sw_if_index1;
335 t->bd_index = vnet_buffer (b[1])->l2.bd_index;
Dave Barach178cf492018-11-13 16:34:13 -0500336 clib_memcpy_fast (t->src, h1->src_address, 6);
337 clib_memcpy_fast (t->dst, h1->dst_address, 6);
Neale Ranns7d645f72018-10-24 02:25:06 -0700338 t->result = result1;
339 }
340 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
341 {
342 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[2], sizeof (*t));
343 t->sw_if_index = sw_if_index2;
344 t->bd_index = vnet_buffer (b[2])->l2.bd_index;
Dave Barach178cf492018-11-13 16:34:13 -0500345 clib_memcpy_fast (t->src, h2->src_address, 6);
346 clib_memcpy_fast (t->dst, h2->dst_address, 6);
Neale Ranns7d645f72018-10-24 02:25:06 -0700347 t->result = result2;
348 }
349 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
350 {
351 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[3], sizeof (*t));
352 t->sw_if_index = sw_if_index3;
353 t->bd_index = vnet_buffer (b[3])->l2.bd_index;
Dave Barach178cf492018-11-13 16:34:13 -0500354 clib_memcpy_fast (t->src, h3->src_address, 6);
355 clib_memcpy_fast (t->dst, h3->dst_address, 6);
Neale Ranns7d645f72018-10-24 02:25:06 -0700356 t->result = result3;
357 }
358 }
359
Neale Rannsc25eb452018-09-12 06:53:03 -0400360 next += 4;
361 b += 4;
362 n_left -= 4;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700363 }
364
Neale Rannsc25eb452018-09-12 06:53:03 -0400365 while (n_left > 0)
366 {
367 u32 sw_if_index0;
368 ethernet_header_t *h0;
369 l2fib_entry_key_t key0;
370 l2fib_entry_result_t result0;
Neale Rannsc25eb452018-09-12 06:53:03 -0400371
372 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
373
374 h0 = vlib_buffer_get_current (b[0]);
375
Neale Rannsc25eb452018-09-12 06:53:03 -0400376 /* process 1 pkt */
377#ifdef COUNTERS
378 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 1;
379#endif
Eyal Bari11d47af2018-10-31 10:55:33 +0200380 l2fib_lookup_1 (msm->mac_table, &cached_key, &cached_result,
381 h0->dst_address, vnet_buffer (b[0])->l2.bd_index, &key0,
382 /* not used */ &result0);
Neale Rannsc25eb452018-09-12 06:53:03 -0400383 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
384
Neale Ranns7d645f72018-10-24 02:25:06 -0700385 if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
386 {
387 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
388 t->sw_if_index = sw_if_index0;
389 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Dave Barach178cf492018-11-13 16:34:13 -0500390 clib_memcpy_fast (t->src, h0->src_address, 6);
391 clib_memcpy_fast (t->dst, h0->dst_address, 6);
Neale Ranns7d645f72018-10-24 02:25:06 -0700392 t->result = result0;
393 }
394
Neale Rannsc25eb452018-09-12 06:53:03 -0400395 /* verify speculative enqueue, maybe switch current next frame */
396 next += 1;
397 b += 1;
398 n_left -= 1;
399 }
400
401 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
402
Ed Warnickecb9cada2015-12-08 15:45:58 -0700403 return frame->n_vectors;
404}
405
Neale Rannsc25eb452018-09-12 06:53:03 -0400406VLIB_NODE_FN (l2fwd_node) (vlib_main_t * vm,
407 vlib_node_runtime_t * node, vlib_frame_t * frame)
Dave Barach681abe42017-02-15 09:01:01 -0500408{
409 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
410 return l2fwd_node_inline (vm, node, frame, 1 /* do_trace */ );
411 return l2fwd_node_inline (vm, node, frame, 0 /* do_trace */ );
412}
413
Dave Barach97d8dc22016-08-15 15:31:15 -0400414/* *INDENT-OFF* */
Damjan Mariond770cfc2019-09-02 19:00:33 +0200415VLIB_REGISTER_NODE (l2fwd_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700416 .name = "l2-fwd",
417 .vector_size = sizeof (u32),
418 .format_trace = format_l2fwd_trace,
419 .type = VLIB_NODE_TYPE_INTERNAL,
Dave Barach97d8dc22016-08-15 15:31:15 -0400420
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421 .n_errors = ARRAY_LEN(l2fwd_error_strings),
422 .error_strings = l2fwd_error_strings,
423
424 .n_next_nodes = L2FWD_N_NEXT,
425
426 /* edit / add dispositions here */
427 .next_nodes = {
428 [L2FWD_NEXT_L2_OUTPUT] = "l2-output",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429 [L2FWD_NEXT_DROP] = "error-drop",
430 },
431};
Dave Barach97d8dc22016-08-15 15:31:15 -0400432/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700433
Neale Rannsc25eb452018-09-12 06:53:03 -0400434#ifndef CLIB_MARCH_VARIANT
435clib_error_t *
436l2fwd_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437{
Dave Barach97d8dc22016-08-15 15:31:15 -0400438 l2fwd_main_t *mp = &l2fwd_main;
439
Ed Warnickecb9cada2015-12-08 15:45:58 -0700440 mp->vlib_main = vm;
Dave Barach97d8dc22016-08-15 15:31:15 -0400441 mp->vnet_main = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442
John Lo9a719292018-04-05 14:52:07 -0400443 /* Initialize the feature next-node indexes */
444 feat_bitmap_init_next_nodes (vm,
445 l2fwd_node.index,
446 L2INPUT_N_FEAT,
447 l2input_get_feat_names (),
448 mp->feat_next_node_index);
449
Ed Warnickecb9cada2015-12-08 15:45:58 -0700450 /* init the hash table ptr */
Dave Barach97d8dc22016-08-15 15:31:15 -0400451 mp->mac_table = get_mac_table ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700452
Dave Barach97d8dc22016-08-15 15:31:15 -0400453 /* Initialize the next nodes for each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700454 next_by_ethertype_init (&mp->l3_next);
455
456 return 0;
457}
458
459VLIB_INIT_FUNCTION (l2fwd_init);
460
461
Chris Luke16bcf7d2016-09-01 14:31:46 -0400462/** Add the L3 input node for this ethertype to the next nodes structure. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700463void
464l2fwd_register_input_type (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400465 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700466{
Dave Barach97d8dc22016-08-15 15:31:15 -0400467 l2fwd_main_t *mp = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700468 u32 next_index;
469
Dave Barach97d8dc22016-08-15 15:31:15 -0400470 next_index = vlib_node_add_next (vm, l2fwd_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471
472 next_by_ethertype_register (&mp->l3_next, type, next_index);
473}
474
475
Dave Barach97d8dc22016-08-15 15:31:15 -0400476/**
Chris Luke16bcf7d2016-09-01 14:31:46 -0400477 * Set subinterface forward enable/disable.
Dave Barach97d8dc22016-08-15 15:31:15 -0400478 * The CLI format is:
479 * set interface l2 forward <interface> [disable]
480 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700481static clib_error_t *
Dave Barach97d8dc22016-08-15 15:31:15 -0400482int_fwd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700483{
Dave Barach97d8dc22016-08-15 15:31:15 -0400484 vnet_main_t *vnm = vnet_get_main ();
485 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486 u32 sw_if_index;
487 u32 enable;
488
Dave Barach97d8dc22016-08-15 15:31:15 -0400489 if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700490 {
491 error = clib_error_return (0, "unknown interface `%U'",
Dave Barach97d8dc22016-08-15 15:31:15 -0400492 format_unformat_error, input);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493 goto done;
494 }
495
496 enable = 1;
Dave Barach97d8dc22016-08-15 15:31:15 -0400497 if (unformat (input, "disable"))
498 {
499 enable = 0;
500 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700501
Dave Barach97d8dc22016-08-15 15:31:15 -0400502 /* set the interface flag */
503 if (l2input_intf_config (sw_if_index)->xconnect)
504 {
505 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_XCONNECT, enable);
506 }
507 else
508 {
509 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_FWD, enable);
510 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700511
Dave Barach97d8dc22016-08-15 15:31:15 -0400512done:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700513 return error;
514}
515
Billy McFall22aa3e92016-09-09 08:46:40 -0400516/*?
517 * Layer 2 unicast forwarding can be enabled and disabled on each
518 * interface and on each bridge-domain. Use this command to
519 * manage interfaces. It is enabled by default.
520 *
521 * @cliexpar
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700522 * Example of how to enable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400523 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0}
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700524 * Example of how to disable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400525 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0 disable}
526?*/
Dave Barach97d8dc22016-08-15 15:31:15 -0400527/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700528VLIB_CLI_COMMAND (int_fwd_cli, static) = {
529 .path = "set interface l2 forward",
530 .short_help = "set interface l2 forward <interface> [disable]",
531 .function = int_fwd,
532};
Dave Barach97d8dc22016-08-15 15:31:15 -0400533/* *INDENT-ON* */
534
Neale Rannsc25eb452018-09-12 06:53:03 -0400535#endif
536
Dave Barach97d8dc22016-08-15 15:31:15 -0400537/*
538 * fd.io coding-style-patch-verification: ON
539 *
540 * Local Variables:
541 * eval: (c-set-style "gnu")
542 * End:
543 */