blob: 1ee3a534cd7f70ffcbd9d9db3354f39e5815cedb [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * l2_fwd.c : layer 2 forwarding using l2fib
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/vnet.h>
20#include <vnet/pg/pg.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vlib/cli.h>
23
24#include <vnet/l2/l2_input.h>
25#include <vnet/l2/l2_bvi.h>
26#include <vnet/l2/l2_fwd.h>
27#include <vnet/l2/l2_fib.h>
Neale Ranns3b81a1e2018-09-06 09:50:26 -070028#include <vnet/l2/feat_bitmap.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070029
30#include <vppinfra/error.h>
31#include <vppinfra/hash.h>
32#include <vppinfra/sparse_vec.h>
33
34
Billy McFall22aa3e92016-09-09 08:46:40 -040035/**
36 * @file
37 * @brief Ethernet Forwarding.
38 *
39 * Code in this file handles forwarding Layer 2 packets. This file calls
40 * the FIB lookup, packet learning and the packet flooding as necessary.
41 * Packet is then sent to the next graph node.
42 */
43
Dave Barach97d8dc22016-08-15 15:31:15 -040044typedef struct
45{
Ed Warnickecb9cada2015-12-08 15:45:58 -070046
Dave Barach97d8dc22016-08-15 15:31:15 -040047 /* Hash table */
48 BVT (clib_bihash) * mac_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Dave Barach97d8dc22016-08-15 15:31:15 -040050 /* next node index for the L3 input node of each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -070051 next_by_ethertype_t l3_next;
52
John Lo9a719292018-04-05 14:52:07 -040053 /* Next nodes for each feature */
54 u32 feat_next_node_index[32];
55
Ed Warnickecb9cada2015-12-08 15:45:58 -070056 /* convenience variables */
Dave Barach97d8dc22016-08-15 15:31:15 -040057 vlib_main_t *vlib_main;
58 vnet_main_t *vnet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -070059} l2fwd_main_t;
60
Dave Barach97d8dc22016-08-15 15:31:15 -040061typedef struct
62{
63 /* per-pkt trace data */
Dave Barach60686012020-04-30 15:42:44 -040064 u8 dst_and_src[12];
Ed Warnickecb9cada2015-12-08 15:45:58 -070065 u32 sw_if_index;
66 u16 bd_index;
Neale Ranns7d645f72018-10-24 02:25:06 -070067 l2fib_entry_result_t result;
Ed Warnickecb9cada2015-12-08 15:45:58 -070068} l2fwd_trace_t;
69
70/* packet trace format function */
Dave Barach97d8dc22016-08-15 15:31:15 -040071static u8 *
72format_l2fwd_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Dave Barach97d8dc22016-08-15 15:31:15 -040076 l2fwd_trace_t *t = va_arg (*args, l2fwd_trace_t *);
77
Neale Ranns7d645f72018-10-24 02:25:06 -070078 s =
79 format (s,
80 "l2-fwd: sw_if_index %d dst %U src %U bd_index %d result [0x%llx, %d] %U",
Dave Barach60686012020-04-30 15:42:44 -040081 t->sw_if_index, format_ethernet_address, t->dst_and_src,
82 format_ethernet_address, t->dst_and_src + 6,
83 t->bd_index, t->result.raw,
Neale Ranns7d645f72018-10-24 02:25:06 -070084 t->result.fields.sw_if_index, format_l2fib_entry_result_flags,
85 t->result.fields.flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -070086 return s;
87}
88
Neale Rannsc25eb452018-09-12 06:53:03 -040089#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -070090l2fwd_main_t l2fwd_main;
Neale Rannsc25eb452018-09-12 06:53:03 -040091#else
92extern l2fwd_main_t l2fwd_main;
93#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070094
Damjan Mariond770cfc2019-09-02 19:00:33 +020095extern vlib_node_registration_t l2fwd_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97#define foreach_l2fwd_error \
98_(L2FWD, "L2 forward packets") \
99_(FLOOD, "L2 forward misses") \
100_(HIT, "L2 forward hits") \
John Lo7185c3b2016-06-04 00:02:37 -0400101_(BVI_BAD_MAC, "BVI L3 MAC mismatch") \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102_(BVI_ETHERTYPE, "BVI packet with unhandled ethertype") \
103_(FILTER_DROP, "Filter Mac Drop") \
Eyal Bari0f360dc2017-06-14 13:11:20 +0300104_(REFLECT_DROP, "Reflection Drop") \
105_(STALE_DROP, "Stale entry Drop")
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106
Dave Barach97d8dc22016-08-15 15:31:15 -0400107typedef enum
108{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109#define _(sym,str) L2FWD_ERROR_##sym,
110 foreach_l2fwd_error
111#undef _
Dave Barach97d8dc22016-08-15 15:31:15 -0400112 L2FWD_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113} l2fwd_error_t;
114
Dave Barach97d8dc22016-08-15 15:31:15 -0400115static char *l2fwd_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116#define _(sym,string) string,
117 foreach_l2fwd_error
118#undef _
119};
120
Dave Barach97d8dc22016-08-15 15:31:15 -0400121typedef enum
122{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123 L2FWD_NEXT_L2_OUTPUT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124 L2FWD_NEXT_DROP,
125 L2FWD_N_NEXT,
126} l2fwd_next_t;
127
Chris Luke16bcf7d2016-09-01 14:31:46 -0400128/** Forward one packet based on the mac table lookup result. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700129
130static_always_inline void
131l2fwd_process (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400132 vlib_node_runtime_t * node,
133 l2fwd_main_t * msm,
134 vlib_error_main_t * em,
135 vlib_buffer_t * b0,
Neale Rannsc25eb452018-09-12 06:53:03 -0400136 u32 sw_if_index0, l2fib_entry_result_t * result0, u16 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137{
Eyal Bari0f360dc2017-06-14 13:11:20 +0300138 int try_flood = result0->raw == ~0;
139 int flood_error;
Dave Barach97d8dc22016-08-15 15:31:15 -0400140
Eyal Bari0f360dc2017-06-14 13:11:20 +0300141 if (PREDICT_FALSE (try_flood))
142 {
143 flood_error = L2FWD_ERROR_FLOOD;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400145 else
146 {
Dave Barach97d8dc22016-08-15 15:31:15 -0400147 /* lookup hit, forward packet */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148#ifdef COUNTERS
Dave Barach97d8dc22016-08-15 15:31:15 -0400149 em->counters[node_counter_base_index + L2FWD_ERROR_HIT] += 1;
150#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151
Dave Barach97d8dc22016-08-15 15:31:15 -0400152 vnet_buffer (b0)->sw_if_index[VLIB_TX] = result0->fields.sw_if_index;
153 *next0 = L2FWD_NEXT_L2_OUTPUT;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300154 int l2fib_seq_num_valid = 1;
John Lo8d00fff2017-08-03 00:35:36 -0400155
Eyal Bari0f360dc2017-06-14 13:11:20 +0300156 /* check l2fib seq num for stale entries */
Neale Rannsb54d0812018-09-06 06:22:56 -0700157 if (!l2fib_entry_result_is_set_AGE_NOT (result0))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300158 {
Neale Ranns47a3d992020-09-29 15:38:51 +0000159 l2fib_seq_num_t in_sn = vnet_buffer (b0)->l2.l2fib_sn;
160 l2fib_seq_num_t expected_sn = l2_fib_update_seq_num (in_sn,
161 l2_input_seq_num
162 (result0->fields.sw_if_index));
163
164 l2fib_seq_num_valid = expected_sn == result0->fields.sn;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300165 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166
Eyal Bari0f360dc2017-06-14 13:11:20 +0300167 if (PREDICT_FALSE (!l2fib_seq_num_valid))
168 {
169 flood_error = L2FWD_ERROR_STALE_DROP;
170 try_flood = 1;
171 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400172 /* perform reflection check */
Eyal Bari0f360dc2017-06-14 13:11:20 +0300173 else if (PREDICT_FALSE (sw_if_index0 == result0->fields.sw_if_index))
Dave Barach97d8dc22016-08-15 15:31:15 -0400174 {
175 b0->error = node->errors[L2FWD_ERROR_REFLECT_DROP];
176 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400177 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300178 /* perform filter check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700179 else if (PREDICT_FALSE (l2fib_entry_result_is_set_FILTER (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400180 {
181 b0->error = node->errors[L2FWD_ERROR_FILTER_DROP];
182 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400183 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300184 /* perform BVI check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700185 else if (PREDICT_FALSE (l2fib_entry_result_is_set_BVI (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400186 {
187 u32 rc;
188 rc = l2_to_bvi (vm,
189 msm->vnet_main,
190 b0,
191 vnet_buffer (b0)->sw_if_index[VLIB_TX],
192 &msm->l3_next, next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193
Dave Barach97d8dc22016-08-15 15:31:15 -0400194 if (PREDICT_FALSE (rc))
195 {
196 if (rc == TO_BVI_ERR_BAD_MAC)
197 {
198 b0->error = node->errors[L2FWD_ERROR_BVI_BAD_MAC];
199 *next0 = L2FWD_NEXT_DROP;
200 }
201 else if (rc == TO_BVI_ERR_ETHERTYPE)
202 {
203 b0->error = node->errors[L2FWD_ERROR_BVI_ETHERTYPE];
204 *next0 = L2FWD_NEXT_DROP;
205 }
206 }
207 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300209
210 /* flood */
211 if (PREDICT_FALSE (try_flood))
212 {
213 /*
John Lo9a719292018-04-05 14:52:07 -0400214 * lookup miss, so flood which is typically the next feature
215 * unless some other feature is inserted before uu_flood
Eyal Bari0f360dc2017-06-14 13:11:20 +0300216 */
Neale Rannsb4743802018-09-05 09:13:57 -0700217 if (vnet_buffer (b0)->l2.feature_bitmap &
Neale Ranns3be91642020-09-14 07:41:48 +0000218 (L2INPUT_FEAT_UU_FLOOD | L2INPUT_FEAT_UU_FWD))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300219 {
John Lo9a719292018-04-05 14:52:07 -0400220 *next0 = vnet_l2_feature_next (b0, msm->feat_next_node_index,
221 L2INPUT_FEAT_FWD);
Eyal Bari0f360dc2017-06-14 13:11:20 +0300222 }
223 else
224 {
225 /* Flooding is disabled */
226 b0->error = node->errors[flood_error];
227 *next0 = L2FWD_NEXT_DROP;
228 }
229 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230}
231
232
Dave Barach681abe42017-02-15 09:01:01 -0500233static_always_inline uword
234l2fwd_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
235 vlib_frame_t * frame, int do_trace)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236{
Neale Rannsc25eb452018-09-12 06:53:03 -0400237 u32 n_left, *from;
Dave Barach97d8dc22016-08-15 15:31:15 -0400238 l2fwd_main_t *msm = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700239 vlib_node_t *n = vlib_get_node (vm, l2fwd_node.index);
Dave Barach97d8dc22016-08-15 15:31:15 -0400240 CLIB_UNUSED (u32 node_counter_base_index) = n->error_heap_index;
241 vlib_error_main_t *em = &vm->error_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242 l2fib_entry_key_t cached_key;
243 l2fib_entry_result_t cached_result;
Neale Rannsc25eb452018-09-12 06:53:03 -0400244 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
245 u16 nexts[VLIB_FRAME_SIZE], *next;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246
Dave Barach97d8dc22016-08-15 15:31:15 -0400247 /* Clear the one-entry cache in case mac table was updated */
248 cached_key.raw = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249 cached_result.raw = ~0;
250
251 from = vlib_frame_vector_args (frame);
Neale Rannsc25eb452018-09-12 06:53:03 -0400252 n_left = frame->n_vectors; /* number of packets to process */
253 vlib_get_buffers (vm, from, bufs, n_left);
254 next = nexts;
255 b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256
Neale Rannsc25eb452018-09-12 06:53:03 -0400257 while (n_left >= 8)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258 {
Neale Rannsc25eb452018-09-12 06:53:03 -0400259 u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
260 const ethernet_header_t *h0, *h1, *h2, *h3;
261 l2fib_entry_key_t key0, key1, key2, key3;
262 l2fib_entry_result_t result0, result1, result2, result3;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700263
Neale Rannsc25eb452018-09-12 06:53:03 -0400264 /* Prefetch next iteration. */
265 {
266 vlib_prefetch_buffer_header (b[4], LOAD);
267 vlib_prefetch_buffer_header (b[5], LOAD);
268 vlib_prefetch_buffer_header (b[6], LOAD);
269 vlib_prefetch_buffer_header (b[7], LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270
Damjan Marionaf7fb042021-07-15 11:54:41 +0200271 clib_prefetch_load (b[4]->data);
272 clib_prefetch_load (b[5]->data);
273 clib_prefetch_load (b[6]->data);
274 clib_prefetch_load (b[7]->data);
Neale Rannsc25eb452018-09-12 06:53:03 -0400275 }
276
277 /* RX interface handles */
278 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
279 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
280 sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
281 sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
282
283 h0 = vlib_buffer_get_current (b[0]);
284 h1 = vlib_buffer_get_current (b[1]);
285 h2 = vlib_buffer_get_current (b[2]);
286 h3 = vlib_buffer_get_current (b[3]);
287
Neale Rannsc25eb452018-09-12 06:53:03 -0400288#ifdef COUNTERS
289 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 4;
290#endif
291 /* *INDENT-OFF* */
292 l2fib_lookup_4 (msm->mac_table, &cached_key, &cached_result,
293 h0->dst_address, h1->dst_address,
294 h2->dst_address, h3->dst_address,
295 vnet_buffer (b[0])->l2.bd_index,
296 vnet_buffer (b[1])->l2.bd_index,
297 vnet_buffer (b[2])->l2.bd_index,
298 vnet_buffer (b[3])->l2.bd_index,
299 &key0, /* not used */
300 &key1, /* not used */
301 &key2, /* not used */
302 &key3, /* not used */
Neale Rannsc25eb452018-09-12 06:53:03 -0400303 &result0,
304 &result1,
305 &result2,
306 &result3);
307 /* *INDENT-ON* */
308 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
309 l2fwd_process (vm, node, msm, em, b[1], sw_if_index1, &result1,
310 next + 1);
311 l2fwd_process (vm, node, msm, em, b[2], sw_if_index2, &result2,
312 next + 2);
313 l2fwd_process (vm, node, msm, em, b[3], sw_if_index3, &result3,
314 next + 3);
315
316 /* verify speculative enqueues, maybe switch current next frame */
317 /* if next0==next1==next_index then nothing special needs to be done */
Neale Ranns7d645f72018-10-24 02:25:06 -0700318 if (do_trace)
319 {
320 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
321 {
322 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
323 t->sw_if_index = sw_if_index0;
324 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400325 clib_memcpy_fast (t->dst_and_src, h0->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000326 sizeof (h0->dst_address) +
327 sizeof (h0->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700328 t->result = result0;
329 }
330 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
331 {
332 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[1], sizeof (*t));
333 t->sw_if_index = sw_if_index1;
334 t->bd_index = vnet_buffer (b[1])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400335 clib_memcpy_fast (t->dst_and_src, h1->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000336 sizeof (h1->dst_address) +
337 sizeof (h1->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700338 t->result = result1;
339 }
340 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
341 {
342 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[2], sizeof (*t));
343 t->sw_if_index = sw_if_index2;
344 t->bd_index = vnet_buffer (b[2])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400345 clib_memcpy_fast (t->dst_and_src, h2->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000346 sizeof (h2->dst_address) +
347 sizeof (h2->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700348 t->result = result2;
349 }
350 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
351 {
352 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[3], sizeof (*t));
353 t->sw_if_index = sw_if_index3;
354 t->bd_index = vnet_buffer (b[3])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400355 clib_memcpy_fast (t->dst_and_src, h3->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000356 sizeof (h3->dst_address) +
357 sizeof (h3->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700358 t->result = result3;
359 }
360 }
361
Neale Rannsc25eb452018-09-12 06:53:03 -0400362 next += 4;
363 b += 4;
364 n_left -= 4;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700365 }
366
Neale Rannsc25eb452018-09-12 06:53:03 -0400367 while (n_left > 0)
368 {
369 u32 sw_if_index0;
370 ethernet_header_t *h0;
371 l2fib_entry_key_t key0;
372 l2fib_entry_result_t result0;
Neale Rannsc25eb452018-09-12 06:53:03 -0400373
374 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
375
376 h0 = vlib_buffer_get_current (b[0]);
377
Neale Rannsc25eb452018-09-12 06:53:03 -0400378 /* process 1 pkt */
379#ifdef COUNTERS
380 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 1;
381#endif
Eyal Bari11d47af2018-10-31 10:55:33 +0200382 l2fib_lookup_1 (msm->mac_table, &cached_key, &cached_result,
383 h0->dst_address, vnet_buffer (b[0])->l2.bd_index, &key0,
384 /* not used */ &result0);
Neale Rannsc25eb452018-09-12 06:53:03 -0400385 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
386
Neale Ranns7d645f72018-10-24 02:25:06 -0700387 if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
388 {
389 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
390 t->sw_if_index = sw_if_index0;
391 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400392 clib_memcpy_fast (t->dst_and_src, h0->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000393 sizeof (h0->dst_address) +
394 sizeof (h0->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700395 t->result = result0;
396 }
397
Neale Rannsc25eb452018-09-12 06:53:03 -0400398 /* verify speculative enqueue, maybe switch current next frame */
399 next += 1;
400 b += 1;
401 n_left -= 1;
402 }
403
404 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
405
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406 return frame->n_vectors;
407}
408
Neale Rannsc25eb452018-09-12 06:53:03 -0400409VLIB_NODE_FN (l2fwd_node) (vlib_main_t * vm,
410 vlib_node_runtime_t * node, vlib_frame_t * frame)
Dave Barach681abe42017-02-15 09:01:01 -0500411{
412 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
413 return l2fwd_node_inline (vm, node, frame, 1 /* do_trace */ );
414 return l2fwd_node_inline (vm, node, frame, 0 /* do_trace */ );
415}
416
Dave Barach97d8dc22016-08-15 15:31:15 -0400417/* *INDENT-OFF* */
Damjan Mariond770cfc2019-09-02 19:00:33 +0200418VLIB_REGISTER_NODE (l2fwd_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700419 .name = "l2-fwd",
420 .vector_size = sizeof (u32),
421 .format_trace = format_l2fwd_trace,
422 .type = VLIB_NODE_TYPE_INTERNAL,
Dave Barach97d8dc22016-08-15 15:31:15 -0400423
Ed Warnickecb9cada2015-12-08 15:45:58 -0700424 .n_errors = ARRAY_LEN(l2fwd_error_strings),
425 .error_strings = l2fwd_error_strings,
426
427 .n_next_nodes = L2FWD_N_NEXT,
428
429 /* edit / add dispositions here */
430 .next_nodes = {
431 [L2FWD_NEXT_L2_OUTPUT] = "l2-output",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700432 [L2FWD_NEXT_DROP] = "error-drop",
433 },
434};
Dave Barach97d8dc22016-08-15 15:31:15 -0400435/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436
Neale Rannsc25eb452018-09-12 06:53:03 -0400437#ifndef CLIB_MARCH_VARIANT
438clib_error_t *
439l2fwd_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700440{
Dave Barach97d8dc22016-08-15 15:31:15 -0400441 l2fwd_main_t *mp = &l2fwd_main;
442
Ed Warnickecb9cada2015-12-08 15:45:58 -0700443 mp->vlib_main = vm;
Dave Barach97d8dc22016-08-15 15:31:15 -0400444 mp->vnet_main = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700445
John Lo9a719292018-04-05 14:52:07 -0400446 /* Initialize the feature next-node indexes */
447 feat_bitmap_init_next_nodes (vm,
448 l2fwd_node.index,
449 L2INPUT_N_FEAT,
450 l2input_get_feat_names (),
451 mp->feat_next_node_index);
452
Ed Warnickecb9cada2015-12-08 15:45:58 -0700453 /* init the hash table ptr */
Dave Barach97d8dc22016-08-15 15:31:15 -0400454 mp->mac_table = get_mac_table ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455
Dave Barach97d8dc22016-08-15 15:31:15 -0400456 /* Initialize the next nodes for each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700457 next_by_ethertype_init (&mp->l3_next);
458
459 return 0;
460}
461
462VLIB_INIT_FUNCTION (l2fwd_init);
463
464
Chris Luke16bcf7d2016-09-01 14:31:46 -0400465/** Add the L3 input node for this ethertype to the next nodes structure. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700466void
467l2fwd_register_input_type (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400468 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700469{
Dave Barach97d8dc22016-08-15 15:31:15 -0400470 l2fwd_main_t *mp = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471 u32 next_index;
472
Dave Barach97d8dc22016-08-15 15:31:15 -0400473 next_index = vlib_node_add_next (vm, l2fwd_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700474
475 next_by_ethertype_register (&mp->l3_next, type, next_index);
476}
477
478
Dave Barach97d8dc22016-08-15 15:31:15 -0400479/**
Chris Luke16bcf7d2016-09-01 14:31:46 -0400480 * Set subinterface forward enable/disable.
Dave Barach97d8dc22016-08-15 15:31:15 -0400481 * The CLI format is:
482 * set interface l2 forward <interface> [disable]
483 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700484static clib_error_t *
Dave Barach97d8dc22016-08-15 15:31:15 -0400485int_fwd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486{
Dave Barach97d8dc22016-08-15 15:31:15 -0400487 vnet_main_t *vnm = vnet_get_main ();
488 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700489 u32 sw_if_index;
490 u32 enable;
491
Dave Barach97d8dc22016-08-15 15:31:15 -0400492 if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493 {
494 error = clib_error_return (0, "unknown interface `%U'",
Dave Barach97d8dc22016-08-15 15:31:15 -0400495 format_unformat_error, input);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700496 goto done;
497 }
498
499 enable = 1;
Dave Barach97d8dc22016-08-15 15:31:15 -0400500 if (unformat (input, "disable"))
501 {
502 enable = 0;
503 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700504
Dave Barach97d8dc22016-08-15 15:31:15 -0400505 /* set the interface flag */
Neale Ranns47a3d992020-09-29 15:38:51 +0000506 if (l2input_intf_config (sw_if_index))
Dave Barach97d8dc22016-08-15 15:31:15 -0400507 {
508 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_XCONNECT, enable);
509 }
510 else
511 {
512 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_FWD, enable);
513 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700514
Dave Barach97d8dc22016-08-15 15:31:15 -0400515done:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516 return error;
517}
518
Billy McFall22aa3e92016-09-09 08:46:40 -0400519/*?
520 * Layer 2 unicast forwarding can be enabled and disabled on each
521 * interface and on each bridge-domain. Use this command to
522 * manage interfaces. It is enabled by default.
523 *
524 * @cliexpar
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700525 * Example of how to enable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400526 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0}
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700527 * Example of how to disable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400528 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0 disable}
529?*/
Dave Barach97d8dc22016-08-15 15:31:15 -0400530/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700531VLIB_CLI_COMMAND (int_fwd_cli, static) = {
532 .path = "set interface l2 forward",
533 .short_help = "set interface l2 forward <interface> [disable]",
534 .function = int_fwd,
535};
Dave Barach97d8dc22016-08-15 15:31:15 -0400536/* *INDENT-ON* */
537
Neale Rannsc25eb452018-09-12 06:53:03 -0400538#endif
539
Dave Barach97d8dc22016-08-15 15:31:15 -0400540/*
541 * fd.io coding-style-patch-verification: ON
542 *
543 * Local Variables:
544 * eval: (c-set-style "gnu")
545 * End:
546 */