blob: e7accc2987833e35f44c11efa28b33e7bc4c9b25 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * l2_fwd.c : layer 2 forwarding using l2fib
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/vnet.h>
20#include <vnet/pg/pg.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vlib/cli.h>
23
24#include <vnet/l2/l2_input.h>
25#include <vnet/l2/l2_bvi.h>
26#include <vnet/l2/l2_fwd.h>
27#include <vnet/l2/l2_fib.h>
Neale Ranns3b81a1e2018-09-06 09:50:26 -070028#include <vnet/l2/feat_bitmap.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070029
30#include <vppinfra/error.h>
31#include <vppinfra/hash.h>
32#include <vppinfra/sparse_vec.h>
33
34
Billy McFall22aa3e92016-09-09 08:46:40 -040035/**
36 * @file
37 * @brief Ethernet Forwarding.
38 *
39 * Code in this file handles forwarding Layer 2 packets. This file calls
40 * the FIB lookup, packet learning and the packet flooding as necessary.
41 * Packet is then sent to the next graph node.
42 */
43
Dave Barach97d8dc22016-08-15 15:31:15 -040044typedef struct
45{
Ed Warnickecb9cada2015-12-08 15:45:58 -070046
Dave Barach97d8dc22016-08-15 15:31:15 -040047 /* Hash table */
48 BVT (clib_bihash) * mac_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Dave Barach97d8dc22016-08-15 15:31:15 -040050 /* next node index for the L3 input node of each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -070051 next_by_ethertype_t l3_next;
52
John Lo9a719292018-04-05 14:52:07 -040053 /* Next nodes for each feature */
54 u32 feat_next_node_index[32];
55
Ed Warnickecb9cada2015-12-08 15:45:58 -070056 /* convenience variables */
Dave Barach97d8dc22016-08-15 15:31:15 -040057 vlib_main_t *vlib_main;
58 vnet_main_t *vnet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -070059} l2fwd_main_t;
60
Dave Barach97d8dc22016-08-15 15:31:15 -040061typedef struct
62{
63 /* per-pkt trace data */
Dave Barach60686012020-04-30 15:42:44 -040064 u8 dst_and_src[12];
Ed Warnickecb9cada2015-12-08 15:45:58 -070065 u32 sw_if_index;
66 u16 bd_index;
Neale Ranns7d645f72018-10-24 02:25:06 -070067 l2fib_entry_result_t result;
Ed Warnickecb9cada2015-12-08 15:45:58 -070068} l2fwd_trace_t;
69
70/* packet trace format function */
Dave Barach97d8dc22016-08-15 15:31:15 -040071static u8 *
72format_l2fwd_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Dave Barach97d8dc22016-08-15 15:31:15 -040076 l2fwd_trace_t *t = va_arg (*args, l2fwd_trace_t *);
77
Neale Ranns7d645f72018-10-24 02:25:06 -070078 s =
79 format (s,
80 "l2-fwd: sw_if_index %d dst %U src %U bd_index %d result [0x%llx, %d] %U",
Dave Barach60686012020-04-30 15:42:44 -040081 t->sw_if_index, format_ethernet_address, t->dst_and_src,
82 format_ethernet_address, t->dst_and_src + 6,
83 t->bd_index, t->result.raw,
Neale Ranns7d645f72018-10-24 02:25:06 -070084 t->result.fields.sw_if_index, format_l2fib_entry_result_flags,
85 t->result.fields.flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -070086 return s;
87}
88
Neale Rannsc25eb452018-09-12 06:53:03 -040089#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -070090l2fwd_main_t l2fwd_main;
Neale Rannsc25eb452018-09-12 06:53:03 -040091#else
92extern l2fwd_main_t l2fwd_main;
93#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070094
Damjan Mariond770cfc2019-09-02 19:00:33 +020095extern vlib_node_registration_t l2fwd_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97#define foreach_l2fwd_error \
98_(L2FWD, "L2 forward packets") \
99_(FLOOD, "L2 forward misses") \
100_(HIT, "L2 forward hits") \
John Lo7185c3b2016-06-04 00:02:37 -0400101_(BVI_BAD_MAC, "BVI L3 MAC mismatch") \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102_(BVI_ETHERTYPE, "BVI packet with unhandled ethertype") \
103_(FILTER_DROP, "Filter Mac Drop") \
Eyal Bari0f360dc2017-06-14 13:11:20 +0300104_(REFLECT_DROP, "Reflection Drop") \
105_(STALE_DROP, "Stale entry Drop")
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106
Dave Barach97d8dc22016-08-15 15:31:15 -0400107typedef enum
108{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109#define _(sym,str) L2FWD_ERROR_##sym,
110 foreach_l2fwd_error
111#undef _
Dave Barach97d8dc22016-08-15 15:31:15 -0400112 L2FWD_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113} l2fwd_error_t;
114
Dave Barach97d8dc22016-08-15 15:31:15 -0400115static char *l2fwd_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116#define _(sym,string) string,
117 foreach_l2fwd_error
118#undef _
119};
120
Dave Barach97d8dc22016-08-15 15:31:15 -0400121typedef enum
122{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123 L2FWD_NEXT_L2_OUTPUT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124 L2FWD_NEXT_DROP,
125 L2FWD_N_NEXT,
126} l2fwd_next_t;
127
Chris Luke16bcf7d2016-09-01 14:31:46 -0400128/** Forward one packet based on the mac table lookup result. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700129
130static_always_inline void
131l2fwd_process (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400132 vlib_node_runtime_t * node,
133 l2fwd_main_t * msm,
134 vlib_error_main_t * em,
135 vlib_buffer_t * b0,
Neale Rannsc25eb452018-09-12 06:53:03 -0400136 u32 sw_if_index0, l2fib_entry_result_t * result0, u16 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137{
Eyal Bari0f360dc2017-06-14 13:11:20 +0300138 int try_flood = result0->raw == ~0;
139 int flood_error;
Dave Barach97d8dc22016-08-15 15:31:15 -0400140
Eyal Bari0f360dc2017-06-14 13:11:20 +0300141 if (PREDICT_FALSE (try_flood))
142 {
143 flood_error = L2FWD_ERROR_FLOOD;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400145 else
146 {
Dave Barach97d8dc22016-08-15 15:31:15 -0400147 /* lookup hit, forward packet */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148#ifdef COUNTERS
Dave Barach97d8dc22016-08-15 15:31:15 -0400149 em->counters[node_counter_base_index + L2FWD_ERROR_HIT] += 1;
150#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151
Dave Barach97d8dc22016-08-15 15:31:15 -0400152 vnet_buffer (b0)->sw_if_index[VLIB_TX] = result0->fields.sw_if_index;
153 *next0 = L2FWD_NEXT_L2_OUTPUT;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300154 int l2fib_seq_num_valid = 1;
John Lo8d00fff2017-08-03 00:35:36 -0400155
Eyal Bari0f360dc2017-06-14 13:11:20 +0300156 /* check l2fib seq num for stale entries */
Neale Rannsb54d0812018-09-06 06:22:56 -0700157 if (!l2fib_entry_result_is_set_AGE_NOT (result0))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300158 {
Neale Ranns47a3d992020-09-29 15:38:51 +0000159 l2fib_seq_num_t in_sn = vnet_buffer (b0)->l2.l2fib_sn;
160 l2fib_seq_num_t expected_sn = l2_fib_update_seq_num (in_sn,
161 l2_input_seq_num
162 (result0->fields.sw_if_index));
163
164 l2fib_seq_num_valid = expected_sn == result0->fields.sn;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300165 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166
Eyal Bari0f360dc2017-06-14 13:11:20 +0300167 if (PREDICT_FALSE (!l2fib_seq_num_valid))
168 {
169 flood_error = L2FWD_ERROR_STALE_DROP;
170 try_flood = 1;
171 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400172 /* perform reflection check */
Eyal Bari0f360dc2017-06-14 13:11:20 +0300173 else if (PREDICT_FALSE (sw_if_index0 == result0->fields.sw_if_index))
Dave Barach97d8dc22016-08-15 15:31:15 -0400174 {
175 b0->error = node->errors[L2FWD_ERROR_REFLECT_DROP];
176 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400177 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300178 /* perform filter check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700179 else if (PREDICT_FALSE (l2fib_entry_result_is_set_FILTER (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400180 {
181 b0->error = node->errors[L2FWD_ERROR_FILTER_DROP];
182 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400183 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300184 /* perform BVI check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700185 else if (PREDICT_FALSE (l2fib_entry_result_is_set_BVI (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400186 {
187 u32 rc;
188 rc = l2_to_bvi (vm,
189 msm->vnet_main,
190 b0,
191 vnet_buffer (b0)->sw_if_index[VLIB_TX],
192 &msm->l3_next, next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193
Dave Barach97d8dc22016-08-15 15:31:15 -0400194 if (PREDICT_FALSE (rc))
195 {
196 if (rc == TO_BVI_ERR_BAD_MAC)
197 {
198 b0->error = node->errors[L2FWD_ERROR_BVI_BAD_MAC];
199 *next0 = L2FWD_NEXT_DROP;
200 }
201 else if (rc == TO_BVI_ERR_ETHERTYPE)
202 {
203 b0->error = node->errors[L2FWD_ERROR_BVI_ETHERTYPE];
204 *next0 = L2FWD_NEXT_DROP;
205 }
206 }
207 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300209
210 /* flood */
211 if (PREDICT_FALSE (try_flood))
212 {
213 /*
John Lo9a719292018-04-05 14:52:07 -0400214 * lookup miss, so flood which is typically the next feature
215 * unless some other feature is inserted before uu_flood
Eyal Bari0f360dc2017-06-14 13:11:20 +0300216 */
Neale Rannsb4743802018-09-05 09:13:57 -0700217 if (vnet_buffer (b0)->l2.feature_bitmap &
Neale Ranns7d645f72018-10-24 02:25:06 -0700218 (L2INPUT_FEAT_UU_FLOOD |
219 L2INPUT_FEAT_UU_FWD | L2INPUT_FEAT_GBP_FWD))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300220 {
John Lo9a719292018-04-05 14:52:07 -0400221 *next0 = vnet_l2_feature_next (b0, msm->feat_next_node_index,
222 L2INPUT_FEAT_FWD);
Eyal Bari0f360dc2017-06-14 13:11:20 +0300223 }
224 else
225 {
226 /* Flooding is disabled */
227 b0->error = node->errors[flood_error];
228 *next0 = L2FWD_NEXT_DROP;
229 }
230 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231}
232
233
Dave Barach681abe42017-02-15 09:01:01 -0500234static_always_inline uword
235l2fwd_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
236 vlib_frame_t * frame, int do_trace)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237{
Neale Rannsc25eb452018-09-12 06:53:03 -0400238 u32 n_left, *from;
Dave Barach97d8dc22016-08-15 15:31:15 -0400239 l2fwd_main_t *msm = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240 vlib_node_t *n = vlib_get_node (vm, l2fwd_node.index);
Dave Barach97d8dc22016-08-15 15:31:15 -0400241 CLIB_UNUSED (u32 node_counter_base_index) = n->error_heap_index;
242 vlib_error_main_t *em = &vm->error_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243 l2fib_entry_key_t cached_key;
244 l2fib_entry_result_t cached_result;
Neale Rannsc25eb452018-09-12 06:53:03 -0400245 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
246 u16 nexts[VLIB_FRAME_SIZE], *next;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700247
Dave Barach97d8dc22016-08-15 15:31:15 -0400248 /* Clear the one-entry cache in case mac table was updated */
249 cached_key.raw = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250 cached_result.raw = ~0;
251
252 from = vlib_frame_vector_args (frame);
Neale Rannsc25eb452018-09-12 06:53:03 -0400253 n_left = frame->n_vectors; /* number of packets to process */
254 vlib_get_buffers (vm, from, bufs, n_left);
255 next = nexts;
256 b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257
Neale Rannsc25eb452018-09-12 06:53:03 -0400258 while (n_left >= 8)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259 {
Neale Rannsc25eb452018-09-12 06:53:03 -0400260 u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
261 const ethernet_header_t *h0, *h1, *h2, *h3;
262 l2fib_entry_key_t key0, key1, key2, key3;
263 l2fib_entry_result_t result0, result1, result2, result3;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264
Neale Rannsc25eb452018-09-12 06:53:03 -0400265 /* Prefetch next iteration. */
266 {
267 vlib_prefetch_buffer_header (b[4], LOAD);
268 vlib_prefetch_buffer_header (b[5], LOAD);
269 vlib_prefetch_buffer_header (b[6], LOAD);
270 vlib_prefetch_buffer_header (b[7], LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271
Neale Rannsc25eb452018-09-12 06:53:03 -0400272 CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
273 CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
274 CLIB_PREFETCH (b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
275 CLIB_PREFETCH (b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
276 }
277
278 /* RX interface handles */
279 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
280 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
281 sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
282 sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
283
284 h0 = vlib_buffer_get_current (b[0]);
285 h1 = vlib_buffer_get_current (b[1]);
286 h2 = vlib_buffer_get_current (b[2]);
287 h3 = vlib_buffer_get_current (b[3]);
288
Neale Rannsc25eb452018-09-12 06:53:03 -0400289#ifdef COUNTERS
290 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 4;
291#endif
292 /* *INDENT-OFF* */
293 l2fib_lookup_4 (msm->mac_table, &cached_key, &cached_result,
294 h0->dst_address, h1->dst_address,
295 h2->dst_address, h3->dst_address,
296 vnet_buffer (b[0])->l2.bd_index,
297 vnet_buffer (b[1])->l2.bd_index,
298 vnet_buffer (b[2])->l2.bd_index,
299 vnet_buffer (b[3])->l2.bd_index,
300 &key0, /* not used */
301 &key1, /* not used */
302 &key2, /* not used */
303 &key3, /* not used */
Neale Rannsc25eb452018-09-12 06:53:03 -0400304 &result0,
305 &result1,
306 &result2,
307 &result3);
308 /* *INDENT-ON* */
309 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
310 l2fwd_process (vm, node, msm, em, b[1], sw_if_index1, &result1,
311 next + 1);
312 l2fwd_process (vm, node, msm, em, b[2], sw_if_index2, &result2,
313 next + 2);
314 l2fwd_process (vm, node, msm, em, b[3], sw_if_index3, &result3,
315 next + 3);
316
317 /* verify speculative enqueues, maybe switch current next frame */
318 /* if next0==next1==next_index then nothing special needs to be done */
Neale Ranns7d645f72018-10-24 02:25:06 -0700319 if (do_trace)
320 {
321 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
322 {
323 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
324 t->sw_if_index = sw_if_index0;
325 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400326 clib_memcpy_fast (t->dst_and_src, h0->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000327 sizeof (h0->dst_address) +
328 sizeof (h0->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700329 t->result = result0;
330 }
331 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
332 {
333 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[1], sizeof (*t));
334 t->sw_if_index = sw_if_index1;
335 t->bd_index = vnet_buffer (b[1])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400336 clib_memcpy_fast (t->dst_and_src, h1->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000337 sizeof (h1->dst_address) +
338 sizeof (h1->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700339 t->result = result1;
340 }
341 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
342 {
343 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[2], sizeof (*t));
344 t->sw_if_index = sw_if_index2;
345 t->bd_index = vnet_buffer (b[2])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400346 clib_memcpy_fast (t->dst_and_src, h2->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000347 sizeof (h2->dst_address) +
348 sizeof (h2->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700349 t->result = result2;
350 }
351 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
352 {
353 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[3], sizeof (*t));
354 t->sw_if_index = sw_if_index3;
355 t->bd_index = vnet_buffer (b[3])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400356 clib_memcpy_fast (t->dst_and_src, h3->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000357 sizeof (h3->dst_address) +
358 sizeof (h3->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700359 t->result = result3;
360 }
361 }
362
Neale Rannsc25eb452018-09-12 06:53:03 -0400363 next += 4;
364 b += 4;
365 n_left -= 4;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700366 }
367
Neale Rannsc25eb452018-09-12 06:53:03 -0400368 while (n_left > 0)
369 {
370 u32 sw_if_index0;
371 ethernet_header_t *h0;
372 l2fib_entry_key_t key0;
373 l2fib_entry_result_t result0;
Neale Rannsc25eb452018-09-12 06:53:03 -0400374
375 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
376
377 h0 = vlib_buffer_get_current (b[0]);
378
Neale Rannsc25eb452018-09-12 06:53:03 -0400379 /* process 1 pkt */
380#ifdef COUNTERS
381 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 1;
382#endif
Eyal Bari11d47af2018-10-31 10:55:33 +0200383 l2fib_lookup_1 (msm->mac_table, &cached_key, &cached_result,
384 h0->dst_address, vnet_buffer (b[0])->l2.bd_index, &key0,
385 /* not used */ &result0);
Neale Rannsc25eb452018-09-12 06:53:03 -0400386 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
387
Neale Ranns7d645f72018-10-24 02:25:06 -0700388 if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
389 {
390 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
391 t->sw_if_index = sw_if_index0;
392 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Dave Barach60686012020-04-30 15:42:44 -0400393 clib_memcpy_fast (t->dst_and_src, h0->dst_address,
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000394 sizeof (h0->dst_address) +
395 sizeof (h0->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700396 t->result = result0;
397 }
398
Neale Rannsc25eb452018-09-12 06:53:03 -0400399 /* verify speculative enqueue, maybe switch current next frame */
400 next += 1;
401 b += 1;
402 n_left -= 1;
403 }
404
405 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
406
Ed Warnickecb9cada2015-12-08 15:45:58 -0700407 return frame->n_vectors;
408}
409
Neale Rannsc25eb452018-09-12 06:53:03 -0400410VLIB_NODE_FN (l2fwd_node) (vlib_main_t * vm,
411 vlib_node_runtime_t * node, vlib_frame_t * frame)
Dave Barach681abe42017-02-15 09:01:01 -0500412{
413 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
414 return l2fwd_node_inline (vm, node, frame, 1 /* do_trace */ );
415 return l2fwd_node_inline (vm, node, frame, 0 /* do_trace */ );
416}
417
Dave Barach97d8dc22016-08-15 15:31:15 -0400418/* *INDENT-OFF* */
Damjan Mariond770cfc2019-09-02 19:00:33 +0200419VLIB_REGISTER_NODE (l2fwd_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700420 .name = "l2-fwd",
421 .vector_size = sizeof (u32),
422 .format_trace = format_l2fwd_trace,
423 .type = VLIB_NODE_TYPE_INTERNAL,
Dave Barach97d8dc22016-08-15 15:31:15 -0400424
Ed Warnickecb9cada2015-12-08 15:45:58 -0700425 .n_errors = ARRAY_LEN(l2fwd_error_strings),
426 .error_strings = l2fwd_error_strings,
427
428 .n_next_nodes = L2FWD_N_NEXT,
429
430 /* edit / add dispositions here */
431 .next_nodes = {
432 [L2FWD_NEXT_L2_OUTPUT] = "l2-output",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700433 [L2FWD_NEXT_DROP] = "error-drop",
434 },
435};
Dave Barach97d8dc22016-08-15 15:31:15 -0400436/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437
Neale Rannsc25eb452018-09-12 06:53:03 -0400438#ifndef CLIB_MARCH_VARIANT
439clib_error_t *
440l2fwd_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441{
Dave Barach97d8dc22016-08-15 15:31:15 -0400442 l2fwd_main_t *mp = &l2fwd_main;
443
Ed Warnickecb9cada2015-12-08 15:45:58 -0700444 mp->vlib_main = vm;
Dave Barach97d8dc22016-08-15 15:31:15 -0400445 mp->vnet_main = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700446
John Lo9a719292018-04-05 14:52:07 -0400447 /* Initialize the feature next-node indexes */
448 feat_bitmap_init_next_nodes (vm,
449 l2fwd_node.index,
450 L2INPUT_N_FEAT,
451 l2input_get_feat_names (),
452 mp->feat_next_node_index);
453
Ed Warnickecb9cada2015-12-08 15:45:58 -0700454 /* init the hash table ptr */
Dave Barach97d8dc22016-08-15 15:31:15 -0400455 mp->mac_table = get_mac_table ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700456
Dave Barach97d8dc22016-08-15 15:31:15 -0400457 /* Initialize the next nodes for each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458 next_by_ethertype_init (&mp->l3_next);
459
460 return 0;
461}
462
463VLIB_INIT_FUNCTION (l2fwd_init);
464
465
Chris Luke16bcf7d2016-09-01 14:31:46 -0400466/** Add the L3 input node for this ethertype to the next nodes structure. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700467void
468l2fwd_register_input_type (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400469 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700470{
Dave Barach97d8dc22016-08-15 15:31:15 -0400471 l2fwd_main_t *mp = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700472 u32 next_index;
473
Dave Barach97d8dc22016-08-15 15:31:15 -0400474 next_index = vlib_node_add_next (vm, l2fwd_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700475
476 next_by_ethertype_register (&mp->l3_next, type, next_index);
477}
478
479
Dave Barach97d8dc22016-08-15 15:31:15 -0400480/**
Chris Luke16bcf7d2016-09-01 14:31:46 -0400481 * Set subinterface forward enable/disable.
Dave Barach97d8dc22016-08-15 15:31:15 -0400482 * The CLI format is:
483 * set interface l2 forward <interface> [disable]
484 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485static clib_error_t *
Dave Barach97d8dc22016-08-15 15:31:15 -0400486int_fwd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700487{
Dave Barach97d8dc22016-08-15 15:31:15 -0400488 vnet_main_t *vnm = vnet_get_main ();
489 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700490 u32 sw_if_index;
491 u32 enable;
492
Dave Barach97d8dc22016-08-15 15:31:15 -0400493 if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700494 {
495 error = clib_error_return (0, "unknown interface `%U'",
Dave Barach97d8dc22016-08-15 15:31:15 -0400496 format_unformat_error, input);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700497 goto done;
498 }
499
500 enable = 1;
Dave Barach97d8dc22016-08-15 15:31:15 -0400501 if (unformat (input, "disable"))
502 {
503 enable = 0;
504 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505
Dave Barach97d8dc22016-08-15 15:31:15 -0400506 /* set the interface flag */
Neale Ranns47a3d992020-09-29 15:38:51 +0000507 if (l2input_intf_config (sw_if_index))
Dave Barach97d8dc22016-08-15 15:31:15 -0400508 {
509 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_XCONNECT, enable);
510 }
511 else
512 {
513 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_FWD, enable);
514 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700515
Dave Barach97d8dc22016-08-15 15:31:15 -0400516done:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700517 return error;
518}
519
Billy McFall22aa3e92016-09-09 08:46:40 -0400520/*?
521 * Layer 2 unicast forwarding can be enabled and disabled on each
522 * interface and on each bridge-domain. Use this command to
523 * manage interfaces. It is enabled by default.
524 *
525 * @cliexpar
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700526 * Example of how to enable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400527 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0}
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700528 * Example of how to disable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400529 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0 disable}
530?*/
Dave Barach97d8dc22016-08-15 15:31:15 -0400531/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700532VLIB_CLI_COMMAND (int_fwd_cli, static) = {
533 .path = "set interface l2 forward",
534 .short_help = "set interface l2 forward <interface> [disable]",
535 .function = int_fwd,
536};
Dave Barach97d8dc22016-08-15 15:31:15 -0400537/* *INDENT-ON* */
538
Neale Rannsc25eb452018-09-12 06:53:03 -0400539#endif
540
Dave Barach97d8dc22016-08-15 15:31:15 -0400541/*
542 * fd.io coding-style-patch-verification: ON
543 *
544 * Local Variables:
545 * eval: (c-set-style "gnu")
546 * End:
547 */