blob: f440778417f2847670178d1a18f83c277dcf47db [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * l2_fwd.c : layer 2 forwarding using l2fib
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/vnet.h>
20#include <vnet/pg/pg.h>
21#include <vnet/ethernet/ethernet.h>
22#include <vlib/cli.h>
23
24#include <vnet/l2/l2_input.h>
25#include <vnet/l2/l2_bvi.h>
26#include <vnet/l2/l2_fwd.h>
27#include <vnet/l2/l2_fib.h>
Neale Ranns3b81a1e2018-09-06 09:50:26 -070028#include <vnet/l2/feat_bitmap.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070029
30#include <vppinfra/error.h>
31#include <vppinfra/hash.h>
32#include <vppinfra/sparse_vec.h>
33
34
Billy McFall22aa3e92016-09-09 08:46:40 -040035/**
36 * @file
37 * @brief Ethernet Forwarding.
38 *
39 * Code in this file handles forwarding Layer 2 packets. This file calls
40 * the FIB lookup, packet learning and the packet flooding as necessary.
41 * Packet is then sent to the next graph node.
42 */
43
Dave Barach97d8dc22016-08-15 15:31:15 -040044typedef struct
45{
Ed Warnickecb9cada2015-12-08 15:45:58 -070046
Dave Barach97d8dc22016-08-15 15:31:15 -040047 /* Hash table */
48 BVT (clib_bihash) * mac_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Dave Barach97d8dc22016-08-15 15:31:15 -040050 /* next node index for the L3 input node of each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -070051 next_by_ethertype_t l3_next;
52
John Lo9a719292018-04-05 14:52:07 -040053 /* Next nodes for each feature */
54 u32 feat_next_node_index[32];
55
Ed Warnickecb9cada2015-12-08 15:45:58 -070056 /* convenience variables */
Dave Barach97d8dc22016-08-15 15:31:15 -040057 vlib_main_t *vlib_main;
58 vnet_main_t *vnet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -070059} l2fwd_main_t;
60
Dave Barach97d8dc22016-08-15 15:31:15 -040061typedef struct
62{
63 /* per-pkt trace data */
Ed Warnickecb9cada2015-12-08 15:45:58 -070064 u8 dst[6];
Zhiyong Yangba6deb92020-04-23 15:21:30 +000065 u8 src[6];
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 u32 sw_if_index;
67 u16 bd_index;
Neale Ranns7d645f72018-10-24 02:25:06 -070068 l2fib_entry_result_t result;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} l2fwd_trace_t;
70
71/* packet trace format function */
Dave Barach97d8dc22016-08-15 15:31:15 -040072static u8 *
73format_l2fwd_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070074{
75 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
76 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Dave Barach97d8dc22016-08-15 15:31:15 -040077 l2fwd_trace_t *t = va_arg (*args, l2fwd_trace_t *);
78
Neale Ranns7d645f72018-10-24 02:25:06 -070079 s =
80 format (s,
81 "l2-fwd: sw_if_index %d dst %U src %U bd_index %d result [0x%llx, %d] %U",
82 t->sw_if_index, format_ethernet_address, t->dst,
83 format_ethernet_address, t->src, t->bd_index, t->result.raw,
84 t->result.fields.sw_if_index, format_l2fib_entry_result_flags,
85 t->result.fields.flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -070086 return s;
87}
88
Neale Rannsc25eb452018-09-12 06:53:03 -040089#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -070090l2fwd_main_t l2fwd_main;
Neale Rannsc25eb452018-09-12 06:53:03 -040091#else
92extern l2fwd_main_t l2fwd_main;
93#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070094
Damjan Mariond770cfc2019-09-02 19:00:33 +020095extern vlib_node_registration_t l2fwd_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97#define foreach_l2fwd_error \
98_(L2FWD, "L2 forward packets") \
99_(FLOOD, "L2 forward misses") \
100_(HIT, "L2 forward hits") \
John Lo7185c3b2016-06-04 00:02:37 -0400101_(BVI_BAD_MAC, "BVI L3 MAC mismatch") \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102_(BVI_ETHERTYPE, "BVI packet with unhandled ethertype") \
103_(FILTER_DROP, "Filter Mac Drop") \
Eyal Bari0f360dc2017-06-14 13:11:20 +0300104_(REFLECT_DROP, "Reflection Drop") \
105_(STALE_DROP, "Stale entry Drop")
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106
Dave Barach97d8dc22016-08-15 15:31:15 -0400107typedef enum
108{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109#define _(sym,str) L2FWD_ERROR_##sym,
110 foreach_l2fwd_error
111#undef _
Dave Barach97d8dc22016-08-15 15:31:15 -0400112 L2FWD_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113} l2fwd_error_t;
114
Dave Barach97d8dc22016-08-15 15:31:15 -0400115static char *l2fwd_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116#define _(sym,string) string,
117 foreach_l2fwd_error
118#undef _
119};
120
Dave Barach97d8dc22016-08-15 15:31:15 -0400121typedef enum
122{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123 L2FWD_NEXT_L2_OUTPUT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124 L2FWD_NEXT_DROP,
125 L2FWD_N_NEXT,
126} l2fwd_next_t;
127
Chris Luke16bcf7d2016-09-01 14:31:46 -0400128/** Forward one packet based on the mac table lookup result. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700129
130static_always_inline void
131l2fwd_process (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400132 vlib_node_runtime_t * node,
133 l2fwd_main_t * msm,
134 vlib_error_main_t * em,
135 vlib_buffer_t * b0,
Neale Rannsc25eb452018-09-12 06:53:03 -0400136 u32 sw_if_index0, l2fib_entry_result_t * result0, u16 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137{
Eyal Bari0f360dc2017-06-14 13:11:20 +0300138 int try_flood = result0->raw == ~0;
139 int flood_error;
Dave Barach97d8dc22016-08-15 15:31:15 -0400140
Eyal Bari0f360dc2017-06-14 13:11:20 +0300141 if (PREDICT_FALSE (try_flood))
142 {
143 flood_error = L2FWD_ERROR_FLOOD;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400145 else
146 {
Dave Barach97d8dc22016-08-15 15:31:15 -0400147 /* lookup hit, forward packet */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148#ifdef COUNTERS
Dave Barach97d8dc22016-08-15 15:31:15 -0400149 em->counters[node_counter_base_index + L2FWD_ERROR_HIT] += 1;
150#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151
Dave Barach97d8dc22016-08-15 15:31:15 -0400152 vnet_buffer (b0)->sw_if_index[VLIB_TX] = result0->fields.sw_if_index;
153 *next0 = L2FWD_NEXT_L2_OUTPUT;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300154 int l2fib_seq_num_valid = 1;
John Lo8d00fff2017-08-03 00:35:36 -0400155
Eyal Bari0f360dc2017-06-14 13:11:20 +0300156 /* check l2fib seq num for stale entries */
Neale Rannsb54d0812018-09-06 06:22:56 -0700157 if (!l2fib_entry_result_is_set_AGE_NOT (result0))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300158 {
159 l2fib_seq_num_t in_sn = {.as_u16 = vnet_buffer (b0)->l2.l2fib_sn };
160 l2fib_seq_num_t expected_sn = {
161 .bd = in_sn.bd,
162 .swif = *l2fib_swif_seq_num (result0->fields.sw_if_index),
163 };
164 l2fib_seq_num_valid =
165 expected_sn.as_u16 == result0->fields.sn.as_u16;
166 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167
Eyal Bari0f360dc2017-06-14 13:11:20 +0300168 if (PREDICT_FALSE (!l2fib_seq_num_valid))
169 {
170 flood_error = L2FWD_ERROR_STALE_DROP;
171 try_flood = 1;
172 }
Dave Barach97d8dc22016-08-15 15:31:15 -0400173 /* perform reflection check */
Eyal Bari0f360dc2017-06-14 13:11:20 +0300174 else if (PREDICT_FALSE (sw_if_index0 == result0->fields.sw_if_index))
Dave Barach97d8dc22016-08-15 15:31:15 -0400175 {
176 b0->error = node->errors[L2FWD_ERROR_REFLECT_DROP];
177 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400178 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300179 /* perform filter check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700180 else if (PREDICT_FALSE (l2fib_entry_result_is_set_FILTER (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400181 {
182 b0->error = node->errors[L2FWD_ERROR_FILTER_DROP];
183 *next0 = L2FWD_NEXT_DROP;
Dave Barach97d8dc22016-08-15 15:31:15 -0400184 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300185 /* perform BVI check */
Neale Rannsb54d0812018-09-06 06:22:56 -0700186 else if (PREDICT_FALSE (l2fib_entry_result_is_set_BVI (result0)))
Dave Barach97d8dc22016-08-15 15:31:15 -0400187 {
188 u32 rc;
189 rc = l2_to_bvi (vm,
190 msm->vnet_main,
191 b0,
192 vnet_buffer (b0)->sw_if_index[VLIB_TX],
193 &msm->l3_next, next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700194
Dave Barach97d8dc22016-08-15 15:31:15 -0400195 if (PREDICT_FALSE (rc))
196 {
197 if (rc == TO_BVI_ERR_BAD_MAC)
198 {
199 b0->error = node->errors[L2FWD_ERROR_BVI_BAD_MAC];
200 *next0 = L2FWD_NEXT_DROP;
201 }
202 else if (rc == TO_BVI_ERR_ETHERTYPE)
203 {
204 b0->error = node->errors[L2FWD_ERROR_BVI_ETHERTYPE];
205 *next0 = L2FWD_NEXT_DROP;
206 }
207 }
208 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700209 }
Eyal Bari0f360dc2017-06-14 13:11:20 +0300210
211 /* flood */
212 if (PREDICT_FALSE (try_flood))
213 {
214 /*
John Lo9a719292018-04-05 14:52:07 -0400215 * lookup miss, so flood which is typically the next feature
216 * unless some other feature is inserted before uu_flood
Eyal Bari0f360dc2017-06-14 13:11:20 +0300217 */
Neale Rannsb4743802018-09-05 09:13:57 -0700218 if (vnet_buffer (b0)->l2.feature_bitmap &
Neale Ranns7d645f72018-10-24 02:25:06 -0700219 (L2INPUT_FEAT_UU_FLOOD |
220 L2INPUT_FEAT_UU_FWD | L2INPUT_FEAT_GBP_FWD))
Eyal Bari0f360dc2017-06-14 13:11:20 +0300221 {
John Lo9a719292018-04-05 14:52:07 -0400222 *next0 = vnet_l2_feature_next (b0, msm->feat_next_node_index,
223 L2INPUT_FEAT_FWD);
Eyal Bari0f360dc2017-06-14 13:11:20 +0300224 }
225 else
226 {
227 /* Flooding is disabled */
228 b0->error = node->errors[flood_error];
229 *next0 = L2FWD_NEXT_DROP;
230 }
231 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232}
233
234
Dave Barach681abe42017-02-15 09:01:01 -0500235static_always_inline uword
236l2fwd_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
237 vlib_frame_t * frame, int do_trace)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238{
Neale Rannsc25eb452018-09-12 06:53:03 -0400239 u32 n_left, *from;
Dave Barach97d8dc22016-08-15 15:31:15 -0400240 l2fwd_main_t *msm = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241 vlib_node_t *n = vlib_get_node (vm, l2fwd_node.index);
Dave Barach97d8dc22016-08-15 15:31:15 -0400242 CLIB_UNUSED (u32 node_counter_base_index) = n->error_heap_index;
243 vlib_error_main_t *em = &vm->error_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244 l2fib_entry_key_t cached_key;
245 l2fib_entry_result_t cached_result;
Neale Rannsc25eb452018-09-12 06:53:03 -0400246 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
247 u16 nexts[VLIB_FRAME_SIZE], *next;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248
Dave Barach97d8dc22016-08-15 15:31:15 -0400249 /* Clear the one-entry cache in case mac table was updated */
250 cached_key.raw = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251 cached_result.raw = ~0;
252
253 from = vlib_frame_vector_args (frame);
Neale Rannsc25eb452018-09-12 06:53:03 -0400254 n_left = frame->n_vectors; /* number of packets to process */
255 vlib_get_buffers (vm, from, bufs, n_left);
256 next = nexts;
257 b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258
Neale Rannsc25eb452018-09-12 06:53:03 -0400259 while (n_left >= 8)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260 {
Neale Rannsc25eb452018-09-12 06:53:03 -0400261 u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
262 const ethernet_header_t *h0, *h1, *h2, *h3;
263 l2fib_entry_key_t key0, key1, key2, key3;
264 l2fib_entry_result_t result0, result1, result2, result3;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
Neale Rannsc25eb452018-09-12 06:53:03 -0400266 /* Prefetch next iteration. */
267 {
268 vlib_prefetch_buffer_header (b[4], LOAD);
269 vlib_prefetch_buffer_header (b[5], LOAD);
270 vlib_prefetch_buffer_header (b[6], LOAD);
271 vlib_prefetch_buffer_header (b[7], LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272
Neale Rannsc25eb452018-09-12 06:53:03 -0400273 CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
274 CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
275 CLIB_PREFETCH (b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
276 CLIB_PREFETCH (b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
277 }
278
279 /* RX interface handles */
280 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
281 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
282 sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
283 sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
284
285 h0 = vlib_buffer_get_current (b[0]);
286 h1 = vlib_buffer_get_current (b[1]);
287 h2 = vlib_buffer_get_current (b[2]);
288 h3 = vlib_buffer_get_current (b[3]);
289
Neale Rannsc25eb452018-09-12 06:53:03 -0400290#ifdef COUNTERS
291 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 4;
292#endif
293 /* *INDENT-OFF* */
294 l2fib_lookup_4 (msm->mac_table, &cached_key, &cached_result,
295 h0->dst_address, h1->dst_address,
296 h2->dst_address, h3->dst_address,
297 vnet_buffer (b[0])->l2.bd_index,
298 vnet_buffer (b[1])->l2.bd_index,
299 vnet_buffer (b[2])->l2.bd_index,
300 vnet_buffer (b[3])->l2.bd_index,
301 &key0, /* not used */
302 &key1, /* not used */
303 &key2, /* not used */
304 &key3, /* not used */
Neale Rannsc25eb452018-09-12 06:53:03 -0400305 &result0,
306 &result1,
307 &result2,
308 &result3);
309 /* *INDENT-ON* */
310 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
311 l2fwd_process (vm, node, msm, em, b[1], sw_if_index1, &result1,
312 next + 1);
313 l2fwd_process (vm, node, msm, em, b[2], sw_if_index2, &result2,
314 next + 2);
315 l2fwd_process (vm, node, msm, em, b[3], sw_if_index3, &result3,
316 next + 3);
317
318 /* verify speculative enqueues, maybe switch current next frame */
319 /* if next0==next1==next_index then nothing special needs to be done */
Neale Ranns7d645f72018-10-24 02:25:06 -0700320 if (do_trace)
321 {
322 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
323 {
324 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
325 t->sw_if_index = sw_if_index0;
326 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000327 clib_memcpy_fast (t->dst, h0->dst_address,
328 sizeof (h0->dst_address) +
329 sizeof (h0->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700330 t->result = result0;
331 }
332 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
333 {
334 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[1], sizeof (*t));
335 t->sw_if_index = sw_if_index1;
336 t->bd_index = vnet_buffer (b[1])->l2.bd_index;
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000337 clib_memcpy_fast (t->dst, h1->dst_address,
338 sizeof (h1->dst_address) +
339 sizeof (h1->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700340 t->result = result1;
341 }
342 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
343 {
344 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[2], sizeof (*t));
345 t->sw_if_index = sw_if_index2;
346 t->bd_index = vnet_buffer (b[2])->l2.bd_index;
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000347 clib_memcpy_fast (t->dst, h2->dst_address,
348 sizeof (h2->dst_address) +
349 sizeof (h2->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700350 t->result = result2;
351 }
352 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
353 {
354 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[3], sizeof (*t));
355 t->sw_if_index = sw_if_index3;
356 t->bd_index = vnet_buffer (b[3])->l2.bd_index;
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000357 clib_memcpy_fast (t->dst, h3->dst_address,
358 sizeof (h3->dst_address) +
359 sizeof (h3->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700360 t->result = result3;
361 }
362 }
363
Neale Rannsc25eb452018-09-12 06:53:03 -0400364 next += 4;
365 b += 4;
366 n_left -= 4;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367 }
368
Neale Rannsc25eb452018-09-12 06:53:03 -0400369 while (n_left > 0)
370 {
371 u32 sw_if_index0;
372 ethernet_header_t *h0;
373 l2fib_entry_key_t key0;
374 l2fib_entry_result_t result0;
Neale Rannsc25eb452018-09-12 06:53:03 -0400375
376 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
377
378 h0 = vlib_buffer_get_current (b[0]);
379
Neale Rannsc25eb452018-09-12 06:53:03 -0400380 /* process 1 pkt */
381#ifdef COUNTERS
382 em->counters[node_counter_base_index + L2FWD_ERROR_L2FWD] += 1;
383#endif
Eyal Bari11d47af2018-10-31 10:55:33 +0200384 l2fib_lookup_1 (msm->mac_table, &cached_key, &cached_result,
385 h0->dst_address, vnet_buffer (b[0])->l2.bd_index, &key0,
386 /* not used */ &result0);
Neale Rannsc25eb452018-09-12 06:53:03 -0400387 l2fwd_process (vm, node, msm, em, b[0], sw_if_index0, &result0, next);
388
Neale Ranns7d645f72018-10-24 02:25:06 -0700389 if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
390 {
391 l2fwd_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
392 t->sw_if_index = sw_if_index0;
393 t->bd_index = vnet_buffer (b[0])->l2.bd_index;
Zhiyong Yangba6deb92020-04-23 15:21:30 +0000394 clib_memcpy_fast (t->dst, h0->dst_address,
395 sizeof (h0->dst_address) +
396 sizeof (h0->src_address));
Neale Ranns7d645f72018-10-24 02:25:06 -0700397 t->result = result0;
398 }
399
Neale Rannsc25eb452018-09-12 06:53:03 -0400400 /* verify speculative enqueue, maybe switch current next frame */
401 next += 1;
402 b += 1;
403 n_left -= 1;
404 }
405
406 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
407
Ed Warnickecb9cada2015-12-08 15:45:58 -0700408 return frame->n_vectors;
409}
410
Neale Rannsc25eb452018-09-12 06:53:03 -0400411VLIB_NODE_FN (l2fwd_node) (vlib_main_t * vm,
412 vlib_node_runtime_t * node, vlib_frame_t * frame)
Dave Barach681abe42017-02-15 09:01:01 -0500413{
414 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
415 return l2fwd_node_inline (vm, node, frame, 1 /* do_trace */ );
416 return l2fwd_node_inline (vm, node, frame, 0 /* do_trace */ );
417}
418
Dave Barach97d8dc22016-08-15 15:31:15 -0400419/* *INDENT-OFF* */
Damjan Mariond770cfc2019-09-02 19:00:33 +0200420VLIB_REGISTER_NODE (l2fwd_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421 .name = "l2-fwd",
422 .vector_size = sizeof (u32),
423 .format_trace = format_l2fwd_trace,
424 .type = VLIB_NODE_TYPE_INTERNAL,
Dave Barach97d8dc22016-08-15 15:31:15 -0400425
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426 .n_errors = ARRAY_LEN(l2fwd_error_strings),
427 .error_strings = l2fwd_error_strings,
428
429 .n_next_nodes = L2FWD_N_NEXT,
430
431 /* edit / add dispositions here */
432 .next_nodes = {
433 [L2FWD_NEXT_L2_OUTPUT] = "l2-output",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434 [L2FWD_NEXT_DROP] = "error-drop",
435 },
436};
Dave Barach97d8dc22016-08-15 15:31:15 -0400437/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700438
Neale Rannsc25eb452018-09-12 06:53:03 -0400439#ifndef CLIB_MARCH_VARIANT
440clib_error_t *
441l2fwd_init (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442{
Dave Barach97d8dc22016-08-15 15:31:15 -0400443 l2fwd_main_t *mp = &l2fwd_main;
444
Ed Warnickecb9cada2015-12-08 15:45:58 -0700445 mp->vlib_main = vm;
Dave Barach97d8dc22016-08-15 15:31:15 -0400446 mp->vnet_main = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447
John Lo9a719292018-04-05 14:52:07 -0400448 /* Initialize the feature next-node indexes */
449 feat_bitmap_init_next_nodes (vm,
450 l2fwd_node.index,
451 L2INPUT_N_FEAT,
452 l2input_get_feat_names (),
453 mp->feat_next_node_index);
454
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455 /* init the hash table ptr */
Dave Barach97d8dc22016-08-15 15:31:15 -0400456 mp->mac_table = get_mac_table ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700457
Dave Barach97d8dc22016-08-15 15:31:15 -0400458 /* Initialize the next nodes for each ethertype */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700459 next_by_ethertype_init (&mp->l3_next);
460
461 return 0;
462}
463
464VLIB_INIT_FUNCTION (l2fwd_init);
465
466
Chris Luke16bcf7d2016-09-01 14:31:46 -0400467/** Add the L3 input node for this ethertype to the next nodes structure. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700468void
469l2fwd_register_input_type (vlib_main_t * vm,
Dave Barach97d8dc22016-08-15 15:31:15 -0400470 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471{
Dave Barach97d8dc22016-08-15 15:31:15 -0400472 l2fwd_main_t *mp = &l2fwd_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700473 u32 next_index;
474
Dave Barach97d8dc22016-08-15 15:31:15 -0400475 next_index = vlib_node_add_next (vm, l2fwd_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700476
477 next_by_ethertype_register (&mp->l3_next, type, next_index);
478}
479
480
Dave Barach97d8dc22016-08-15 15:31:15 -0400481/**
Chris Luke16bcf7d2016-09-01 14:31:46 -0400482 * Set subinterface forward enable/disable.
Dave Barach97d8dc22016-08-15 15:31:15 -0400483 * The CLI format is:
484 * set interface l2 forward <interface> [disable]
485 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486static clib_error_t *
Dave Barach97d8dc22016-08-15 15:31:15 -0400487int_fwd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700488{
Dave Barach97d8dc22016-08-15 15:31:15 -0400489 vnet_main_t *vnm = vnet_get_main ();
490 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491 u32 sw_if_index;
492 u32 enable;
493
Dave Barach97d8dc22016-08-15 15:31:15 -0400494 if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700495 {
496 error = clib_error_return (0, "unknown interface `%U'",
Dave Barach97d8dc22016-08-15 15:31:15 -0400497 format_unformat_error, input);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498 goto done;
499 }
500
501 enable = 1;
Dave Barach97d8dc22016-08-15 15:31:15 -0400502 if (unformat (input, "disable"))
503 {
504 enable = 0;
505 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700506
Dave Barach97d8dc22016-08-15 15:31:15 -0400507 /* set the interface flag */
508 if (l2input_intf_config (sw_if_index)->xconnect)
509 {
510 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_XCONNECT, enable);
511 }
512 else
513 {
514 l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_FWD, enable);
515 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516
Dave Barach97d8dc22016-08-15 15:31:15 -0400517done:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700518 return error;
519}
520
Billy McFall22aa3e92016-09-09 08:46:40 -0400521/*?
522 * Layer 2 unicast forwarding can be enabled and disabled on each
523 * interface and on each bridge-domain. Use this command to
524 * manage interfaces. It is enabled by default.
525 *
526 * @cliexpar
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700527 * Example of how to enable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400528 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0}
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700529 * Example of how to disable forwarding:
Billy McFall22aa3e92016-09-09 08:46:40 -0400530 * @cliexcmd{set interface l2 forward GigabitEthernet0/8/0 disable}
531?*/
Dave Barach97d8dc22016-08-15 15:31:15 -0400532/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700533VLIB_CLI_COMMAND (int_fwd_cli, static) = {
534 .path = "set interface l2 forward",
535 .short_help = "set interface l2 forward <interface> [disable]",
536 .function = int_fwd,
537};
Dave Barach97d8dc22016-08-15 15:31:15 -0400538/* *INDENT-ON* */
539
Neale Rannsc25eb452018-09-12 06:53:03 -0400540#endif
541
Dave Barach97d8dc22016-08-15 15:31:15 -0400542/*
543 * fd.io coding-style-patch-verification: ON
544 *
545 * Local Variables:
546 * eval: (c-set-style "gnu")
547 * End:
548 */