blob: 03cbdde1c2b4eb1fb90c31fa4a77b0e88213b4fd [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Damjan Marion650223c2018-11-14 16:55:53 +01002 * Copyright (c) 2018 Cisco and/or its affiliates.
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ethernet_node.c: ethernet packet processing
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vnet/pg/pg.h>
42#include <vnet/ethernet/ethernet.h>
Pavel Kotucek15ac81c2017-06-20 14:00:26 +020043#include <vnet/ethernet/p2p_ethernet.h>
Neale Ranns17ff3c12018-07-04 10:24:24 -070044#include <vnet/devices/pipe/pipe.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045#include <vppinfra/sparse_vec.h>
46#include <vnet/l2/l2_bvi.h>
BenoƮt Ganne30a81952021-02-26 13:47:41 +010047#include <vnet/classify/pcap_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
Damjan Marion650223c2018-11-14 16:55:53 +010052 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070056typedef enum
57{
Ed Warnickecb9cada2015-12-08 15:45:58 -070058#define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
60#undef _
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070061 ETHERNET_INPUT_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070062} ethernet_input_next_t;
63
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070064typedef struct
65{
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 u8 packet_data[32];
Damjan Marion650223c2018-11-14 16:55:53 +010067 u16 frame_flags;
68 ethernet_input_frame_t frame_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} ethernet_input_trace_t;
70
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070071static u8 *
72format_ethernet_input_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070076 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
Damjan Marion650223c2018-11-14 16:55:53 +010077 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078
Damjan Marion650223c2018-11-14 16:55:53 +010079 if (t->frame_flags)
80 {
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 s = format (s, "%U", format_ethernet_header, t->packet_data);
88
89 return s;
90}
91
Damjan Marione849da22018-09-12 13:32:01 +020092extern vlib_node_registration_t ethernet_input_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070094typedef enum
95{
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
Ed Warnickecb9cada2015-12-08 15:45:58 -070098 ETHERNET_INPUT_VARIANT_NOT_L2,
99} ethernet_input_variant_t;
100
101
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102// Parse the ethernet header to extract vlan tags and innermost ethertype
103static_always_inline void
104parse_header (ethernet_input_variant_t variant,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700105 vlib_buffer_t * b0,
106 u16 * type,
107 u16 * orig_type,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
109{
Chris Luke194ebc52016-04-25 14:26:55 -0400110 u8 vlan_count;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700111
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
114 {
115 ethernet_header_t *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116
Zhiyong Yang9f833582020-04-11 14:36:55 +0000117 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Damjan Marion072401e2017-07-13 18:53:27 +0200119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
Steven35de3b32017-12-03 23:40:54 -0800120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700122 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700124 *type = clib_net_to_host_u16 (e0->type);
125 }
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
127 {
128 // here when prior node was LLC/SNAP processing
129 u16 *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
Zhiyong Yang9f833582020-04-11 14:36:55 +0000131 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700133 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700135 *type = clib_net_to_host_u16 (e0[0]);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137
138 // save for distinguishing between dot1q and dot1ad later
139 *orig_type = *type;
140
141 // default the tags to 0 (used if there is no corresponding tag)
142 *outer_id = 0;
143 *inner_id = 0;
144
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
Chris Luke194ebc52016-04-25 14:26:55 -0400146 vlan_count = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 // check for vlan encaps
Damjan Marionb94bdad2016-09-19 11:32:03 +0200149 if (ethernet_frame_is_tagged (*type))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700150 {
151 ethernet_vlan_header_t *h0;
152 u16 tag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155
Zhiyong Yang9f833582020-04-11 14:36:55 +0000156 h0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
159
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700160 *outer_id = tag & 0xfff;
Neale Ranns30d0fd42017-05-30 07:30:04 -0700161 if (0 == *outer_id)
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700164 *type = clib_net_to_host_u16 (h0->type);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165
166 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700167 vlan_count = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700169 if (*type == ETHERNET_TYPE_VLAN)
170 {
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
173
Zhiyong Yang9f833582020-04-11 14:36:55 +0000174 h0 = vlib_buffer_get_current (b0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700175
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
177
178 *inner_id = tag & 0xfff;
179
180 *type = clib_net_to_host_u16 (h0->type);
181
182 vlib_buffer_advance (b0, sizeof (h0[0]));
183 vlan_count = 2;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700184 if (*type == ETHERNET_TYPE_VLAN)
185 {
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300188
189 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700190 vlan_count = 3; // "unknown" number, aka, 3-or-more
191 }
192 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700194 ethernet_buffer_set_vlan_count (b0, vlan_count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195}
196
Matthew Smith42bde452019-11-18 09:35:24 -0600197static_always_inline void
198ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
199 u64 * dmacs, u8 * dmacs_bad,
200 u32 n_packets, ethernet_interface_t * ei,
201 u8 have_sec_dmac);
202
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203// Determine the subinterface for this packet, given the result of the
204// vlan table lookups and vlan header parsing. Check the most specific
205// matches first.
206static_always_inline void
Ivan Shvedunov72869432020-10-15 13:19:35 +0300207identify_subint (ethernet_main_t * em,
208 vnet_hw_interface_t * hi,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700209 vlib_buffer_t * b0,
210 u32 match_flags,
211 main_intf_t * main_intf,
212 vlan_intf_t * vlan_intf,
213 qinq_intf_t * qinq_intf,
214 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215{
216 u32 matched;
Ivan Shvedunov72869432020-10-15 13:19:35 +0300217 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
Damjan Marionddf6e082018-11-26 16:05:07 +0100219 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
220 qinq_intf, new_sw_if_index, error0, is_l2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700222 if (matched)
223 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700224 // Perform L3 my-mac filter
John Lo4a302ee2020-05-12 22:34:39 -0400225 // A unicast packet arriving on an L3 interface must have a dmac
226 // matching the interface mac. If interface has STATUS_L3 bit set
227 // mac filter is already done.
Andrew Yourtchenko966e6ff2022-08-30 11:22:09 +0000228 if ((!*is_l2) && ei &&
229 (!(ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700230 {
Matthew Smith42bde452019-11-18 09:35:24 -0600231 u64 dmacs[2];
232 u8 dmacs_bad[2];
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700233 ethernet_header_t *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234
Matthew Smith42bde452019-11-18 09:35:24 -0600235 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
236 dmacs[0] = *(u64 *) e0;
Matthew Smith42bde452019-11-18 09:35:24 -0600237
Andrew Yourtchenko966e6ff2022-08-30 11:22:09 +0000238 if (vec_len (ei->secondary_addrs))
Matthew Smith42bde452019-11-18 09:35:24 -0600239 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
Andrew Yourtchenko966e6ff2022-08-30 11:22:09 +0000240 1 /* n_packets */, ei,
241 1 /* have_sec_dmac */);
Matthew Smith42bde452019-11-18 09:35:24 -0600242 else
243 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
Andrew Yourtchenko966e6ff2022-08-30 11:22:09 +0000244 1 /* n_packets */, ei,
245 0 /* have_sec_dmac */);
Matthew Smith42bde452019-11-18 09:35:24 -0600246 if (dmacs_bad[0])
247 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700248 }
249
250 // Check for down subinterface
251 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253}
254
255static_always_inline void
256determine_next_node (ethernet_main_t * em,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700257 ethernet_input_variant_t variant,
258 u32 is_l20,
259 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260{
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200261 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
262 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
263
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700264 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
265 {
266 // some error occurred
267 *next0 = ETHERNET_INPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700269 else if (is_l20)
270 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700271 // record the L2 len and reset the buffer so the L2 header is preserved
John Lob14826e2018-04-18 15:52:23 -0400272 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
273 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
274 *next0 = em->l2_next;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300275 ASSERT (vnet_buffer (b0)->l2.l2_len ==
276 ethernet_buffer_header_size (b0));
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200277 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700278
279 // check for common IP/MPLS ethertypes
280 }
281 else if (type0 == ETHERNET_TYPE_IP4)
282 {
283 *next0 = em->l3_next.input_next_ip4;
284 }
285 else if (type0 == ETHERNET_TYPE_IP6)
286 {
287 *next0 = em->l3_next.input_next_ip6;
288 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800289 else if (type0 == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700290 {
291 *next0 = em->l3_next.input_next_mpls;
292
293 }
294 else if (em->redirect_l3)
295 {
296 // L3 Redirect is on, the cached common next nodes will be
297 // pointing to the redirect node, catch the uncommon types here
298 *next0 = em->redirect_l3_next;
299 }
300 else
301 {
302 // uncommon ethertype, check table
303 u32 i0;
304 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
305 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
306 *error0 =
307 i0 ==
308 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
309
310 // The table is not populated with LLC values, so check that now.
Damjan Marion607de1a2016-08-16 22:53:54 +0200311 // If variant is variant_ethernet then we came from LLC processing. Don't
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700312 // go back there; drop instead using by keeping the drop/bad table result.
313 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
314 {
315 *next0 = ETHERNET_INPUT_NEXT_LLC;
316 }
317 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700318}
319
Damjan Marion650223c2018-11-14 16:55:53 +0100320
321/* following vector code relies on following assumptions */
322STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
323STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
324STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
325STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
326 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
327 "l3_hdr_offset must follow l2_hdr_offset");
328
329static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100330eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100331{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100332 i16 adv = sizeof (ethernet_header_t);
333 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
334 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
335
Damjan Marion650223c2018-11-14 16:55:53 +0100336#ifdef CLIB_HAVE_VEC256
337 /* to reduce number of small loads/stores we are loading first 64 bits
338 of each buffer metadata into 256-bit register so we can advance
339 current_data, current_length and flags.
340 Observed saving of this code is ~2 clocks per packet */
341 u64x4 r, radv;
342
343 /* vector if signed 16 bit integers used in signed vector add operation
344 to advnce current_data and current_length */
345 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
346 i16x16 adv4 = {
347 adv, -adv, 0, 0, adv, -adv, 0, 0,
348 adv, -adv, 0, 0, adv, -adv, 0, 0
349 };
350
351 /* load 4 x 64 bits */
352 r = u64x4_gather (b[0], b[1], b[2], b[3]);
353
354 /* set flags */
355 r |= (u64x4) flags4;
356
357 /* advance buffer */
358 radv = (u64x4) ((i16x16) r + adv4);
359
360 /* write 4 x 64 bits */
361 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
362
363 /* use old current_data as l2_hdr_offset and new current_data as
364 l3_hdr_offset */
365 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
366
367 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
368 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
369 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
370 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
371 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
372
Damjan Marione9cebdf2018-11-21 00:47:42 +0100373 if (is_l3)
374 {
375 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
376 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
377 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
378 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
Damjan Marion650223c2018-11-14 16:55:53 +0100379
Damjan Marione9cebdf2018-11-21 00:47:42 +0100380 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
381 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
382 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
383 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
384 }
385 else
386 {
387 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
388 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
389 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
390 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
391
392 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
393 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
394 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
395 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
396 }
Damjan Marion650223c2018-11-14 16:55:53 +0100397
398#else
399 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
400 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
401 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
402 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
403 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
404 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
405 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
406 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
407
408 if (is_l3)
409 {
410 vlib_buffer_advance (b[0], adv);
411 vlib_buffer_advance (b[1], adv);
412 vlib_buffer_advance (b[2], adv);
413 vlib_buffer_advance (b[3], adv);
414 }
415
416 b[0]->flags |= flags;
417 b[1]->flags |= flags;
418 b[2]->flags |= flags;
419 b[3]->flags |= flags;
420#endif
421
422 if (!is_l3)
423 {
424 vnet_buffer (b[0])->l2.l2_len = adv;
425 vnet_buffer (b[1])->l2.l2_len = adv;
426 vnet_buffer (b[2])->l2.l2_len = adv;
427 vnet_buffer (b[3])->l2.l2_len = adv;
428 }
429}
430
431static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100432eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100433{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100434 i16 adv = sizeof (ethernet_header_t);
435 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
436 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
437
Damjan Marion650223c2018-11-14 16:55:53 +0100438 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
439 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
440
441 if (is_l3)
442 vlib_buffer_advance (b[0], adv);
443 b[0]->flags |= flags;
444 if (!is_l3)
445 vnet_buffer (b[0])->l2.l2_len = adv;
446}
447
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100448
Damjan Marion650223c2018-11-14 16:55:53 +0100449static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100450eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
451 u64 * dmacs, int offset, int dmac_check)
Damjan Marion650223c2018-11-14 16:55:53 +0100452{
Damjan Marion650223c2018-11-14 16:55:53 +0100453 ethernet_header_t *e;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100454 e = vlib_buffer_get_current (b[offset]);
455#ifdef CLIB_HAVE_VEC128
456 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
457 etype[offset] = ((u16x8) r)[3];
458 tags[offset] = r[1];
459#else
460 etype[offset] = e->type;
461 tags[offset] = *(u64 *) (e + 1);
462#endif
Damjan Marion650223c2018-11-14 16:55:53 +0100463
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100464 if (dmac_check)
465 dmacs[offset] = *(u64 *) e;
466}
Damjan Marion650223c2018-11-14 16:55:53 +0100467
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100468static_always_inline u16
469eth_input_next_by_type (u16 etype)
470{
471 ethernet_main_t *em = &ethernet_main;
472
473 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
474 vec_elt (em->l3_next.input_next_by_type,
475 sparse_vec_index (em->l3_next.input_next_by_type, etype));
476}
477
478typedef struct
479{
480 u64 tag, mask;
481 u32 sw_if_index;
482 u16 type, len, next;
483 i16 adv;
484 u8 err, n_tags;
485 u64 n_packets, n_bytes;
486} eth_input_tag_lookup_t;
487
488static_always_inline void
489eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
490 eth_input_tag_lookup_t * l)
491{
492 if (l->n_packets == 0 || l->sw_if_index == ~0)
493 return;
494
495 if (l->adv > 0)
496 l->n_bytes += l->n_packets * l->len;
497
498 vlib_increment_combined_counter
499 (vnm->interface_main.combined_sw_if_counters +
500 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
501 l->n_packets, l->n_bytes);
502}
503
504static_always_inline void
505eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
506 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
507 u64 tag, u16 * next, vlib_buffer_t * b,
508 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
509 int main_is_l3, int check_dmac)
510{
511 ethernet_main_t *em = &ethernet_main;
512
513 if ((tag ^ l->tag) & l->mask)
Damjan Marion650223c2018-11-14 16:55:53 +0100514 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100515 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
516 vlan_intf_t *vif;
517 qinq_intf_t *qif;
518 vlan_table_t *vlan_table;
519 qinq_table_t *qinq_table;
520 u16 *t = (u16 *) & tag;
521 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
522 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
523 u32 matched, is_l2, new_sw_if_index;
524
525 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
526 mif->dot1ad_vlans : mif->dot1q_vlans);
527 vif = &vlan_table->vlans[vlan1];
528 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
529 qif = &qinq_table->vlans[vlan2];
530 l->err = ETHERNET_ERROR_NONE;
531 l->type = clib_net_to_host_u16 (t[1]);
532
533 if (l->type == ETHERNET_TYPE_VLAN)
534 {
535 l->type = clib_net_to_host_u16 (t[3]);
536 l->n_tags = 2;
537 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
538 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
539 qif, &new_sw_if_index, &l->err,
540 &is_l2);
541 }
542 else
543 {
544 l->n_tags = 1;
545 if (vlan1 == 0)
546 {
547 new_sw_if_index = hi->sw_if_index;
548 l->err = ETHERNET_ERROR_NONE;
549 matched = 1;
550 is_l2 = main_is_l3 == 0;
551 }
552 else
553 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
554 SUBINT_CONFIG_MATCH_1_TAG, mif,
555 vif, qif, &new_sw_if_index,
556 &l->err, &is_l2);
557 }
558
559 if (l->sw_if_index != new_sw_if_index)
560 {
561 eth_input_update_if_counters (vm, vnm, l);
562 l->n_packets = 0;
563 l->n_bytes = 0;
564 l->sw_if_index = new_sw_if_index;
565 }
566 l->tag = tag;
567 l->mask = (l->n_tags == 2) ?
568 clib_net_to_host_u64 (0xffffffffffffffff) :
569 clib_net_to_host_u64 (0xffffffff00000000);
570
571 if (matched && l->sw_if_index == ~0)
572 l->err = ETHERNET_ERROR_DOWN;
573
574 l->len = sizeof (ethernet_header_t) +
575 l->n_tags * sizeof (ethernet_vlan_header_t);
576 if (main_is_l3)
577 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
578 l->n_tags * sizeof (ethernet_vlan_header_t);
579 else
580 l->adv = is_l2 ? 0 : l->len;
581
582 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
583 l->next = ETHERNET_INPUT_NEXT_DROP;
584 else if (is_l2)
585 l->next = em->l2_next;
586 else if (l->type == ETHERNET_TYPE_IP4)
587 l->next = em->l3_next.input_next_ip4;
588 else if (l->type == ETHERNET_TYPE_IP6)
589 l->next = em->l3_next.input_next_ip6;
590 else if (l->type == ETHERNET_TYPE_MPLS)
591 l->next = em->l3_next.input_next_mpls;
592 else if (em->redirect_l3)
593 l->next = em->redirect_l3_next;
594 else
595 {
596 l->next = eth_input_next_by_type (l->type);
597 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
598 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
599 }
600 }
601
602 if (check_dmac && l->adv > 0 && dmac_bad)
603 {
604 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
605 next[0] = ETHERNET_INPUT_NEXT_PUNT;
606 }
607 else
608 next[0] = l->next;
609
610 vlib_buffer_advance (b, l->adv);
611 vnet_buffer (b)->l2.l2_len = l->len;
612 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
613
614 if (l->err == ETHERNET_ERROR_NONE)
615 {
616 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
617 ethernet_buffer_set_vlan_count (b, l->n_tags);
618 }
619 else
620 b->error = node->errors[l->err];
621
622 /* update counters */
623 l->n_packets += 1;
624 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
625}
626
Matthew G Smithd459bf32019-09-04 15:01:04 -0500627#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
628#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
629
630#ifdef CLIB_HAVE_VEC256
631static_always_inline u32
632is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
633{
634 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
635 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
636 return u8x32_msb_mask ((u8x32) (r0));
637}
Matthew Smith42bde452019-11-18 09:35:24 -0600638#endif
639
Matthew G Smithd459bf32019-09-04 15:01:04 -0500640static_always_inline u8
641is_dmac_bad (u64 dmac, u64 hwaddr)
642{
643 u64 r0 = dmac & DMAC_MASK;
644 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
645}
Matthew G Smithd459bf32019-09-04 15:01:04 -0500646
647static_always_inline u8
648is_sec_dmac_bad (u64 dmac, u64 hwaddr)
649{
650 return ((dmac & DMAC_MASK) != hwaddr);
651}
652
653#ifdef CLIB_HAVE_VEC256
654static_always_inline u32
655is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
656{
657 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
658 r0 = (r0 != u64x4_splat (hwaddr));
659 return u8x32_msb_mask ((u8x32) (r0));
660}
661#endif
662
663static_always_inline u8
664eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
665{
666 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
667 return dmac_bad[0];
668}
669
670static_always_inline u32
671eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
672{
673#ifdef CLIB_HAVE_VEC256
674 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
675#else
676 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
677 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
678 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
679 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
680#endif
681 return *(u32 *) dmac_bad;
682}
683
Matthew Smith42bde452019-11-18 09:35:24 -0600684/*
685 * DMAC check for ethernet_input_inline()
686 *
687 * dmacs and dmacs_bad are arrays that are 2 elements long
688 * n_packets should be 1 or 2 for ethernet_input_inline()
689 */
690static_always_inline void
691ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
692 u64 * dmacs, u8 * dmacs_bad,
693 u32 n_packets, ethernet_interface_t * ei,
694 u8 have_sec_dmac)
695{
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200696 u64 hwaddr = ei->address.as_u64;
Matthew Smith42bde452019-11-18 09:35:24 -0600697 u8 bad = 0;
698
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200699 ASSERT (0 == ei->address.zero);
700
Matthew Smith42bde452019-11-18 09:35:24 -0600701 dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
702 dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));
703
704 bad = dmacs_bad[0] | dmacs_bad[1];
705
706 if (PREDICT_FALSE (bad && have_sec_dmac))
707 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200708 ethernet_interface_address_t *sec_addr;
Matthew Smith42bde452019-11-18 09:35:24 -0600709
710 vec_foreach (sec_addr, ei->secondary_addrs)
711 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200712 ASSERT (0 == sec_addr->zero);
713 hwaddr = sec_addr->as_u64;
Matthew Smith42bde452019-11-18 09:35:24 -0600714
715 bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
716 eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
717 dmacs_bad + 1));
718
719 if (!bad)
720 return;
721 }
722 }
723}
724
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500725static_always_inline void
726eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
727 u64 * dmacs, u8 * dmacs_bad,
Matthew G Smithd459bf32019-09-04 15:01:04 -0500728 u32 n_packets, ethernet_interface_t * ei,
729 u8 have_sec_dmac)
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500730{
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200731 u64 hwaddr = ei->address.as_u64;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500732 u64 *dmac = dmacs;
733 u8 *dmac_bad = dmacs_bad;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500734 u32 bad = 0;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500735 i32 n_left = n_packets;
736
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200737 ASSERT (0 == ei->address.zero);
738
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500739#ifdef CLIB_HAVE_VEC256
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500740 while (n_left > 0)
741 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500742 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
743 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500744
745 /* next */
746 dmac += 8;
747 dmac_bad += 8;
748 n_left -= 8;
749 }
750#else
751 while (n_left > 0)
752 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500753 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
754 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
755 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
756 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500757
758 /* next */
759 dmac += 4;
760 dmac_bad += 4;
761 n_left -= 4;
762 }
763#endif
Matthew G Smithd459bf32019-09-04 15:01:04 -0500764
765 if (have_sec_dmac && bad)
766 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200767 ethernet_interface_address_t *addr;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500768
769 vec_foreach (addr, ei->secondary_addrs)
770 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200771 u64 hwaddr = addr->as_u64;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500772 i32 n_left = n_packets;
773 u64 *dmac = dmacs;
774 u8 *dmac_bad = dmacs_bad;
775
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200776 ASSERT (0 == addr->zero);
777
Matthew G Smithd459bf32019-09-04 15:01:04 -0500778 bad = 0;
779
780 while (n_left > 0)
781 {
782 int adv = 0;
783 int n_bad;
784
785 /* skip any that have already matched */
786 if (!dmac_bad[0])
787 {
788 dmac += 1;
789 dmac_bad += 1;
790 n_left -= 1;
791 continue;
792 }
793
794 n_bad = clib_min (4, n_left);
795
796 /* If >= 4 left, compare 4 together */
797 if (n_bad == 4)
798 {
799 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
800 adv = 4;
801 n_bad = 0;
802 }
803
804 /* handle individually */
805 while (n_bad > 0)
806 {
807 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
808 dmac_bad + adv);
809 adv += 1;
810 n_bad -= 1;
811 }
812
813 dmac += adv;
814 dmac_bad += adv;
815 n_left -= adv;
816 }
817
818 if (!bad) /* can stop looping if everything matched */
819 break;
820 }
821 }
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500822}
823
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100824/* process frame of buffers, store ethertype into array and update
825 buffer metadata fields depending on interface being l2 or l3 assuming that
826 packets are untagged. For tagged packets those fields are updated later.
827 Optionally store Destionation MAC address and tag data into arrays
828 for further processing */
829
830STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
831 "VLIB_FRAME_SIZE must be power of 8");
832static_always_inline void
833eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
834 vnet_hw_interface_t * hi,
835 u32 * buffer_indices, u32 n_packets, int main_is_l3,
836 int ip4_cksum_ok, int dmac_check)
837{
838 ethernet_main_t *em = &ethernet_main;
839 u16 nexts[VLIB_FRAME_SIZE], *next;
840 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
841 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
842 u8 dmacs_bad[VLIB_FRAME_SIZE];
843 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
844 u16 slowpath_indices[VLIB_FRAME_SIZE];
845 u16 n_slowpath, i;
846 u16 next_ip4, next_ip6, next_mpls, next_l2;
847 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
848 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
849 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
850 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
851 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
852 i32 n_left = n_packets;
Zhiyong Yang70312882020-03-27 17:12:35 +0000853 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
854 vlib_buffer_t **b = bufs;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500855 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100856
Zhiyong Yang70312882020-03-27 17:12:35 +0000857 vlib_get_buffers (vm, buffer_indices, b, n_left);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100858
859 while (n_left >= 20)
860 {
861 vlib_buffer_t **ph = b + 16, **pd = b + 8;
Damjan Marion650223c2018-11-14 16:55:53 +0100862
863 vlib_prefetch_buffer_header (ph[0], LOAD);
864 vlib_prefetch_buffer_data (pd[0], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100865 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100866
867 vlib_prefetch_buffer_header (ph[1], LOAD);
868 vlib_prefetch_buffer_data (pd[1], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100869 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100870
871 vlib_prefetch_buffer_header (ph[2], LOAD);
872 vlib_prefetch_buffer_data (pd[2], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100873 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100874
875 vlib_prefetch_buffer_header (ph[3], LOAD);
876 vlib_prefetch_buffer_data (pd[3], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100877 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100878
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100879 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100880
881 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000882 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100883 n_left -= 4;
884 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100885 tag += 4;
886 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100887 }
888 while (n_left >= 4)
889 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100890 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
891 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
892 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
893 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
894 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100895
896 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000897 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100898 n_left -= 4;
899 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100900 tag += 4;
901 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100902 }
903 while (n_left)
904 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100905 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
906 eth_input_adv_and_flags_x1 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100907
908 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000909 b += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100910 n_left -= 1;
911 etype += 1;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100912 tag += 1;
Zhiyong Yangcbe36e42020-05-09 07:13:34 +0000913 dmac += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100914 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100915
916 if (dmac_check)
Matthew G Smithd459bf32019-09-04 15:01:04 -0500917 {
Matthew Smith49389382019-10-02 16:34:27 -0500918 if (ei && vec_len (ei->secondary_addrs))
Matthew G Smithd459bf32019-09-04 15:01:04 -0500919 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
920 ei, 1 /* have_sec_dmac */ );
921 else
922 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
923 ei, 0 /* have_sec_dmac */ );
924 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100925
926 next_ip4 = em->l3_next.input_next_ip4;
927 next_ip6 = em->l3_next.input_next_ip6;
928 next_mpls = em->l3_next.input_next_mpls;
929 next_l2 = em->l2_next;
930
931 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
932 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
933
934#ifdef CLIB_HAVE_VEC256
935 u16x16 et16_ip4 = u16x16_splat (et_ip4);
936 u16x16 et16_ip6 = u16x16_splat (et_ip6);
937 u16x16 et16_mpls = u16x16_splat (et_mpls);
938 u16x16 et16_vlan = u16x16_splat (et_vlan);
939 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
940 u16x16 next16_ip4 = u16x16_splat (next_ip4);
941 u16x16 next16_ip6 = u16x16_splat (next_ip6);
942 u16x16 next16_mpls = u16x16_splat (next_mpls);
943 u16x16 next16_l2 = u16x16_splat (next_l2);
944 u16x16 zero = { 0 };
945 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
946#endif
947
948 etype = etypes;
949 n_left = n_packets;
950 next = nexts;
951 n_slowpath = 0;
952 i = 0;
953
954 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
955 are considered as slowpath, in l2 mode all untagged packets are
956 considered as fastpath */
957 while (n_left > 0)
958 {
959#ifdef CLIB_HAVE_VEC256
960 if (n_left >= 16)
961 {
962 u16x16 r = zero;
963 u16x16 e16 = u16x16_load_unaligned (etype);
964 if (main_is_l3)
965 {
966 r += (e16 == et16_ip4) & next16_ip4;
967 r += (e16 == et16_ip6) & next16_ip6;
968 r += (e16 == et16_mpls) & next16_mpls;
969 }
970 else
971 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
972 u16x16_store_unaligned (r, next);
973
974 if (!u16x16_is_all_zero (r == zero))
975 {
976 if (u16x16_is_all_zero (r))
977 {
978 u16x16_store_unaligned (u16x16_splat (i) + stairs,
979 slowpath_indices + n_slowpath);
980 n_slowpath += 16;
981 }
982 else
983 {
984 for (int j = 0; j < 16; j++)
Steven Luonge4238aa2024-04-19 09:49:20 -0700985 {
986 if (next[j] == 0)
987 slowpath_indices[n_slowpath++] = i + j;
988 else if (dmac_check && main_is_l3 && dmacs_bad[i + j])
989 {
990 next[j] = 0;
991 slowpath_indices[n_slowpath++] = i + j;
992 }
993 }
994 }
995 }
996 else
997 {
998 if (dmac_check && main_is_l3)
999 {
1000 u8x16 dmac_bad = u8x16_load_unaligned (&dmacs_bad[i]);
1001 if (!u8x16_is_all_zero (dmac_bad))
1002 {
1003 for (int j = 0; j < 16; j++)
1004 if (dmacs_bad[i + j])
1005 {
1006 next[j] = 0;
1007 slowpath_indices[n_slowpath++] = i + j;
1008 }
1009 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001010 }
1011 }
1012
1013 etype += 16;
1014 next += 16;
1015 n_left -= 16;
1016 i += 16;
1017 continue;
1018 }
1019#endif
Steven Luonge4238aa2024-04-19 09:49:20 -07001020 if (dmac_check && main_is_l3 && dmacs_bad[i])
1021 {
1022 next[0] = 0;
1023 slowpath_indices[n_slowpath++] = i;
1024 }
1025 else if (main_is_l3 && etype[0] == et_ip4)
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001026 next[0] = next_ip4;
1027 else if (main_is_l3 && etype[0] == et_ip6)
1028 next[0] = next_ip6;
1029 else if (main_is_l3 && etype[0] == et_mpls)
1030 next[0] = next_mpls;
1031 else if (main_is_l3 == 0 &&
1032 etype[0] != et_vlan && etype[0] != et_dot1ad)
1033 next[0] = next_l2;
1034 else
1035 {
1036 next[0] = 0;
1037 slowpath_indices[n_slowpath++] = i;
1038 }
1039
1040 etype += 1;
1041 next += 1;
1042 n_left -= 1;
1043 i += 1;
1044 }
1045
1046 if (n_slowpath)
1047 {
1048 vnet_main_t *vnm = vnet_get_main ();
1049 n_left = n_slowpath;
1050 u16 *si = slowpath_indices;
1051 u32 last_unknown_etype = ~0;
1052 u32 last_unknown_next = ~0;
1053 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
1054 .mask = -1LL,
1055 .tag = tags[si[0]] ^ -1LL,
1056 .sw_if_index = ~0
1057 };
1058
1059 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
1060
1061 while (n_left)
1062 {
1063 i = si[0];
1064 u16 etype = etypes[i];
1065
1066 if (etype == et_vlan)
1067 {
1068 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1069 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1070 &dot1q_lookup, dmacs_bad[i], 0,
1071 main_is_l3, dmac_check);
1072
1073 }
1074 else if (etype == et_dot1ad)
1075 {
1076 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1077 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1078 &dot1ad_lookup, dmacs_bad[i], 1,
1079 main_is_l3, dmac_check);
1080 }
1081 else
1082 {
Steven Luonge4238aa2024-04-19 09:49:20 -07001083 /* untagged packet with not well known ethertype */
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001084 if (last_unknown_etype != etype)
1085 {
1086 last_unknown_etype = etype;
1087 etype = clib_host_to_net_u16 (etype);
1088 last_unknown_next = eth_input_next_by_type (etype);
1089 }
1090 if (dmac_check && main_is_l3 && dmacs_bad[i])
1091 {
1092 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1093 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1094 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1095 }
1096 else
1097 nexts[i] = last_unknown_next;
1098 }
1099
1100 /* next */
1101 n_left--;
1102 si++;
1103 }
1104
1105 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1106 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1107 }
1108
1109 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
Damjan Marion650223c2018-11-14 16:55:53 +01001110}
1111
1112static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001113eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1114 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1115 int ip4_cksum_ok)
Damjan Marion650223c2018-11-14 16:55:53 +01001116{
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001117 ethernet_main_t *em = &ethernet_main;
1118 ethernet_interface_t *ei;
1119 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1120 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1121 subint_config_t *subint0 = &intf0->untagged_subint;
Damjan Marion650223c2018-11-14 16:55:53 +01001122
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001123 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
John Lo4a302ee2020-05-12 22:34:39 -04001124 int int_is_l3 = ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3;
Damjan Marion650223c2018-11-14 16:55:53 +01001125
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001126 if (main_is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +01001127 {
John Lo4a302ee2020-05-12 22:34:39 -04001128 if (int_is_l3 || /* DMAC filter already done by NIC */
1129 ((hi->l2_if_count != 0) && (hi->l3_if_count == 0)))
1130 { /* All L2 usage - DMAC check not needed */
1131 eth_input_process_frame (vm, node, hi, from, n_pkts,
1132 /*is_l3 */ 1, ip4_cksum_ok, 0);
1133 }
Damjan Marion650223c2018-11-14 16:55:53 +01001134 else
John Lo4a302ee2020-05-12 22:34:39 -04001135 { /* DMAC check needed for L3 */
1136 eth_input_process_frame (vm, node, hi, from, n_pkts,
1137 /*is_l3 */ 1, ip4_cksum_ok, 1);
1138 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001139 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001140 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001141 else
Damjan Marion650223c2018-11-14 16:55:53 +01001142 {
John Lo4a302ee2020-05-12 22:34:39 -04001143 if (hi->l3_if_count == 0)
1144 { /* All L2 usage - DMAC check not needed */
1145 eth_input_process_frame (vm, node, hi, from, n_pkts,
1146 /*is_l3 */ 0, ip4_cksum_ok, 0);
1147 }
1148 else
1149 { /* DMAC check needed for L3 */
1150 eth_input_process_frame (vm, node, hi, from, n_pkts,
1151 /*is_l3 */ 0, ip4_cksum_ok, 1);
1152 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001153 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001154 }
1155}
1156
1157static_always_inline void
1158ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1159 vlib_frame_t * from_frame)
1160{
Damjan Marion8fb5add2021-03-04 18:41:59 +01001161 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001162 u32 *from, n_left;
BenoƮt Ganne98477922019-04-10 14:21:11 +02001163 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
Damjan Marion650223c2018-11-14 16:55:53 +01001164 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001165 from = vlib_frame_vector_args (from_frame);
1166 n_left = from_frame->n_vectors;
Damjan Marion650223c2018-11-14 16:55:53 +01001167
Dave Barach5ecd5a52019-02-25 15:27:28 -05001168 while (n_left)
Damjan Marion650223c2018-11-14 16:55:53 +01001169 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001170 ethernet_input_trace_t *t0;
1171 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1172
1173 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1174 {
1175 t0 = vlib_add_trace (vm, node, b0,
1176 sizeof (ethernet_input_trace_t));
1177 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1178 sizeof (t0->packet_data));
1179 t0->frame_flags = from_frame->flags;
1180 clib_memcpy_fast (&t0->frame_data,
1181 vlib_frame_scalar_args (from_frame),
1182 sizeof (ethernet_input_frame_t));
1183 }
1184 from += 1;
1185 n_left -= 1;
Damjan Marion650223c2018-11-14 16:55:53 +01001186 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001187 }
1188
1189 /* rx pcap capture if enabled */
Damjan Marion8fb5add2021-03-04 18:41:59 +01001190 if (PREDICT_FALSE (vnm->pcap.pcap_rx_enable))
Dave Barach5ecd5a52019-02-25 15:27:28 -05001191 {
1192 u32 bi0;
Damjan Marion8fb5add2021-03-04 18:41:59 +01001193 vnet_pcap_t *pp = &vnm->pcap;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001194
1195 from = vlib_frame_vector_args (from_frame);
1196 n_left = from_frame->n_vectors;
1197 while (n_left > 0)
1198 {
1199 vlib_buffer_t *b0;
1200 bi0 = from[0];
1201 from++;
Dave Barach9137e542019-09-13 17:47:50 -04001202 n_left--;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001203 b0 = vlib_get_buffer (vm, bi0);
BenoƮt Ganne30a81952021-02-26 13:47:41 +01001204 if (vnet_is_packet_pcaped (pp, b0, ~0))
1205 pcap_add_buffer (&pp->pcap_main, vm, bi0, pp->max_bytes_per_pkt);
Dave Barach5ecd5a52019-02-25 15:27:28 -05001206 }
Damjan Marion650223c2018-11-14 16:55:53 +01001207 }
1208}
1209
1210static_always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001211ethernet_input_inline (vlib_main_t * vm,
1212 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001213 u32 * from, u32 n_packets,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001214 ethernet_input_variant_t variant)
1215{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001216 vnet_main_t *vnm = vnet_get_main ();
1217 ethernet_main_t *em = &ethernet_main;
1218 vlib_node_runtime_t *error_node;
Damjan Marion650223c2018-11-14 16:55:53 +01001219 u32 n_left_from, next_index, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001220 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Damjan Marion067cd622018-07-11 12:47:43 +02001221 u32 thread_index = vm->thread_index;
Dave Barachcfba1e22016-11-16 10:23:50 -05001222 u32 cached_sw_if_index = ~0;
1223 u32 cached_is_l2 = 0; /* shut up gcc */
John Lo1904c472017-03-10 17:15:22 -05001224 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
Matthew Smith42bde452019-11-18 09:35:24 -06001225 ethernet_interface_t *ei = NULL;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001226 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1227 vlib_buffer_t **b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001228
1229 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1230 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1231 else
1232 error_node = node;
1233
Damjan Marion650223c2018-11-14 16:55:53 +01001234 n_left_from = n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001235
1236 next_index = node->cached_next_index;
1237 stats_sw_if_index = node->runtime_data[0];
1238 stats_n_packets = stats_n_bytes = 0;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001239 vlib_get_buffers (vm, from, bufs, n_left_from);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001240
1241 while (n_left_from > 0)
1242 {
1243 u32 n_left_to_next;
1244
1245 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1246
1247 while (n_left_from >= 4 && n_left_to_next >= 2)
1248 {
1249 u32 bi0, bi1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001250 vlib_buffer_t *b0, *b1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001251 u8 next0, next1, error0, error1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001252 u16 type0, orig_type0, type1, orig_type1;
1253 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1254 u32 match_flags0, match_flags1;
1255 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1256 new_sw_if_index1, len1;
1257 vnet_hw_interface_t *hi0, *hi1;
1258 main_intf_t *main_intf0, *main_intf1;
1259 vlan_intf_t *vlan_intf0, *vlan_intf1;
1260 qinq_intf_t *qinq_intf0, *qinq_intf1;
1261 u32 is_l20, is_l21;
Dave Barachcfba1e22016-11-16 10:23:50 -05001262 ethernet_header_t *e0, *e1;
Matthew Smith42bde452019-11-18 09:35:24 -06001263 u64 dmacs[2];
1264 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001265
1266 /* Prefetch next iteration. */
1267 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001268 vlib_prefetch_buffer_header (b[2], STORE);
1269 vlib_prefetch_buffer_header (b[3], STORE);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001270
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001271 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1272 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001273 }
1274
1275 bi0 = from[0];
1276 bi1 = from[1];
1277 to_next[0] = bi0;
1278 to_next[1] = bi1;
1279 from += 2;
1280 to_next += 2;
1281 n_left_to_next -= 2;
1282 n_left_from -= 2;
1283
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001284 b0 = b[0];
1285 b1 = b[1];
1286 b += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001287
1288 error0 = error1 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001289 e0 = vlib_buffer_get_current (b0);
1290 type0 = clib_net_to_host_u16 (e0->type);
1291 e1 = vlib_buffer_get_current (b1);
1292 type1 = clib_net_to_host_u16 (e1->type);
1293
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001294 /* Set the L2 header offset for all packets */
1295 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1296 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1297 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1298 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1299
John Locc532852016-12-14 15:42:45 -05001300 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001301 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
Damjan Marionc6969b52018-02-19 12:14:06 +01001302 && !ethernet_frame_is_any_tagged_x2 (type0,
1303 type1)))
Dave Barachcfba1e22016-11-16 10:23:50 -05001304 {
1305 main_intf_t *intf0;
1306 subint_config_t *subint0;
1307 u32 sw_if_index0, sw_if_index1;
1308
1309 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1310 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1311 is_l20 = cached_is_l2;
1312
1313 /* This is probably wholly unnecessary */
1314 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1315 goto slowpath;
1316
John Lo1904c472017-03-10 17:15:22 -05001317 /* Now sw_if_index0 == sw_if_index1 */
Dave Barachcfba1e22016-11-16 10:23:50 -05001318 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1319 {
1320 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001321 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001322 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001323 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001324 subint0 = &intf0->untagged_subint;
1325 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1326 }
John Lo7714b302016-12-20 16:59:02 -05001327
Dave Barachcfba1e22016-11-16 10:23:50 -05001328 if (PREDICT_TRUE (is_l20 != 0))
1329 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001330 vnet_buffer (b0)->l3_hdr_offset =
1331 vnet_buffer (b0)->l2_hdr_offset +
1332 sizeof (ethernet_header_t);
1333 vnet_buffer (b1)->l3_hdr_offset =
1334 vnet_buffer (b1)->l2_hdr_offset +
1335 sizeof (ethernet_header_t);
1336 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1337 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001338 next0 = em->l2_next;
1339 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001340 next1 = em->l2_next;
1341 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001342 }
John Locc532852016-12-14 15:42:45 -05001343 else
1344 {
Dave Barach99c6dc62021-02-15 12:46:47 -05001345 if (ei && (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3))
John Lo4a302ee2020-05-12 22:34:39 -04001346 goto skip_dmac_check01;
1347
Matthew Smith42bde452019-11-18 09:35:24 -06001348 dmacs[0] = *(u64 *) e0;
1349 dmacs[1] = *(u64 *) e1;
1350
1351 if (ei && vec_len (ei->secondary_addrs))
1352 ethernet_input_inline_dmac_check (hi, dmacs,
1353 dmacs_bad,
1354 2 /* n_packets */ ,
1355 ei,
1356 1 /* have_sec_dmac */ );
1357 else
1358 ethernet_input_inline_dmac_check (hi, dmacs,
1359 dmacs_bad,
1360 2 /* n_packets */ ,
1361 ei,
1362 0 /* have_sec_dmac */ );
1363
1364 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001365 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001366 if (dmacs_bad[1])
John Lo1904c472017-03-10 17:15:22 -05001367 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001368
John Lo4a302ee2020-05-12 22:34:39 -04001369 skip_dmac_check01:
John Lob14826e2018-04-18 15:52:23 -04001370 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001371 determine_next_node (em, variant, 0, type0, b0,
1372 &error0, &next0);
John Lob14826e2018-04-18 15:52:23 -04001373 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001374 determine_next_node (em, variant, 0, type1, b1,
1375 &error1, &next1);
John Locc532852016-12-14 15:42:45 -05001376 }
1377 goto ship_it01;
Dave Barachcfba1e22016-11-16 10:23:50 -05001378 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001379
John Locc532852016-12-14 15:42:45 -05001380 /* Slow-path for the tagged case */
1381 slowpath:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001382 parse_header (variant,
1383 b0,
1384 &type0,
1385 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001386
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001387 parse_header (variant,
1388 b1,
1389 &type1,
1390 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001391
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001392 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1393 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001394
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001395 eth_vlan_table_lookups (em,
1396 vnm,
1397 old_sw_if_index0,
1398 orig_type0,
1399 outer_id0,
1400 inner_id0,
1401 &hi0,
1402 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001403
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001404 eth_vlan_table_lookups (em,
1405 vnm,
1406 old_sw_if_index1,
1407 orig_type1,
1408 outer_id1,
1409 inner_id1,
1410 &hi1,
1411 &main_intf1, &vlan_intf1, &qinq_intf1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001412
Ivan Shvedunov72869432020-10-15 13:19:35 +03001413 identify_subint (em,
1414 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001415 b0,
1416 match_flags0,
1417 main_intf0,
1418 vlan_intf0,
1419 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001420
Ivan Shvedunov72869432020-10-15 13:19:35 +03001421 identify_subint (em,
1422 hi1,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001423 b1,
1424 match_flags1,
1425 main_intf1,
1426 vlan_intf1,
1427 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001428
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001429 // Save RX sw_if_index for later nodes
1430 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1431 error0 !=
1432 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1433 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1434 error1 !=
1435 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001436
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001437 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1438 if (((new_sw_if_index0 != ~0)
1439 && (new_sw_if_index0 != old_sw_if_index0))
1440 || ((new_sw_if_index1 != ~0)
1441 && (new_sw_if_index1 != old_sw_if_index1)))
1442 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001443
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001444 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001445 - vnet_buffer (b0)->l2_hdr_offset;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001446 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001447 - vnet_buffer (b1)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001448
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001449 stats_n_packets += 2;
1450 stats_n_bytes += len0 + len1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001451
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001452 if (PREDICT_FALSE
1453 (!(new_sw_if_index0 == stats_sw_if_index
1454 && new_sw_if_index1 == stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001455 {
1456 stats_n_packets -= 2;
1457 stats_n_bytes -= len0 + len1;
1458
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001459 if (new_sw_if_index0 != old_sw_if_index0
1460 && new_sw_if_index0 != ~0)
1461 vlib_increment_combined_counter (vnm->
1462 interface_main.combined_sw_if_counters
1463 +
1464 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001465 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001466 new_sw_if_index0, 1,
1467 len0);
1468 if (new_sw_if_index1 != old_sw_if_index1
1469 && new_sw_if_index1 != ~0)
1470 vlib_increment_combined_counter (vnm->
1471 interface_main.combined_sw_if_counters
1472 +
1473 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001474 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001475 new_sw_if_index1, 1,
1476 len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001477
1478 if (new_sw_if_index0 == new_sw_if_index1)
1479 {
1480 if (stats_n_packets > 0)
1481 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001482 vlib_increment_combined_counter
1483 (vnm->interface_main.combined_sw_if_counters
1484 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001485 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001486 stats_sw_if_index,
1487 stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001488 stats_n_packets = stats_n_bytes = 0;
1489 }
1490 stats_sw_if_index = new_sw_if_index0;
1491 }
1492 }
1493 }
1494
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001495 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1496 is_l20 = is_l21 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001497
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001498 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1499 &next0);
1500 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1501 &next1);
1502
John Lo1904c472017-03-10 17:15:22 -05001503 ship_it01:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001504 b0->error = error_node->errors[error0];
1505 b1->error = error_node->errors[error1];
1506
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001507 // verify speculative enqueue
1508 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1509 n_left_to_next, bi0, bi1, next0,
1510 next1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001511 }
1512
1513 while (n_left_from > 0 && n_left_to_next > 0)
1514 {
1515 u32 bi0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001516 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001517 u8 error0, next0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001518 u16 type0, orig_type0;
1519 u16 outer_id0, inner_id0;
1520 u32 match_flags0;
1521 u32 old_sw_if_index0, new_sw_if_index0, len0;
1522 vnet_hw_interface_t *hi0;
1523 main_intf_t *main_intf0;
1524 vlan_intf_t *vlan_intf0;
1525 qinq_intf_t *qinq_intf0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001526 ethernet_header_t *e0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001527 u32 is_l20;
Matthew Smith42bde452019-11-18 09:35:24 -06001528 u64 dmacs[2];
1529 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001530
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001531 // Prefetch next iteration
1532 if (n_left_from > 1)
1533 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001534 vlib_prefetch_buffer_header (b[1], STORE);
Damjan Marionaf7fb042021-07-15 11:54:41 +02001535 clib_prefetch_load (b[1]->data);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001536 }
1537
1538 bi0 = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001539 to_next[0] = bi0;
1540 from += 1;
1541 to_next += 1;
1542 n_left_from -= 1;
1543 n_left_to_next -= 1;
1544
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001545 b0 = b[0];
1546 b += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001547
1548 error0 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001549 e0 = vlib_buffer_get_current (b0);
1550 type0 = clib_net_to_host_u16 (e0->type);
1551
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001552 /* Set the L2 header offset for all packets */
1553 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1554 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1555
John Locc532852016-12-14 15:42:45 -05001556 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001557 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1558 && !ethernet_frame_is_tagged (type0)))
1559 {
1560 main_intf_t *intf0;
1561 subint_config_t *subint0;
1562 u32 sw_if_index0;
1563
1564 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1565 is_l20 = cached_is_l2;
1566
1567 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1568 {
1569 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001570 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001571 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001572 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001573 subint0 = &intf0->untagged_subint;
1574 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1575 }
John Lo7714b302016-12-20 16:59:02 -05001576
John Lo7714b302016-12-20 16:59:02 -05001577
Dave Barachcfba1e22016-11-16 10:23:50 -05001578 if (PREDICT_TRUE (is_l20 != 0))
1579 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001580 vnet_buffer (b0)->l3_hdr_offset =
1581 vnet_buffer (b0)->l2_hdr_offset +
1582 sizeof (ethernet_header_t);
1583 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001584 next0 = em->l2_next;
1585 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001586 }
John Locc532852016-12-14 15:42:45 -05001587 else
1588 {
Dave Barach99c6dc62021-02-15 12:46:47 -05001589 if (ei && ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
John Lo4a302ee2020-05-12 22:34:39 -04001590 goto skip_dmac_check0;
1591
Matthew Smith42bde452019-11-18 09:35:24 -06001592 dmacs[0] = *(u64 *) e0;
1593
Andrew Yourtchenko76b8aa02022-08-23 15:48:59 +00001594 if (ei)
1595 {
1596 if (vec_len (ei->secondary_addrs))
1597 ethernet_input_inline_dmac_check (
1598 hi, dmacs, dmacs_bad, 1 /* n_packets */, ei,
1599 1 /* have_sec_dmac */);
1600 else
1601 ethernet_input_inline_dmac_check (
1602 hi, dmacs, dmacs_bad, 1 /* n_packets */, ei,
1603 0 /* have_sec_dmac */);
Matthew Smith42bde452019-11-18 09:35:24 -06001604
Andrew Yourtchenko76b8aa02022-08-23 15:48:59 +00001605 if (dmacs_bad[0])
1606 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1607 }
Matthew Smith42bde452019-11-18 09:35:24 -06001608
John Lo4a302ee2020-05-12 22:34:39 -04001609 skip_dmac_check0:
Andrew Yourtchenkoe78bca12018-10-10 16:15:55 +02001610 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001611 determine_next_node (em, variant, 0, type0, b0,
1612 &error0, &next0);
John Locc532852016-12-14 15:42:45 -05001613 }
1614 goto ship_it0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001615 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001616
John Locc532852016-12-14 15:42:45 -05001617 /* Slow-path for the tagged case */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001618 parse_header (variant,
1619 b0,
1620 &type0,
1621 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001622
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001623 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001624
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001625 eth_vlan_table_lookups (em,
1626 vnm,
1627 old_sw_if_index0,
1628 orig_type0,
1629 outer_id0,
1630 inner_id0,
1631 &hi0,
1632 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001633
Ivan Shvedunov72869432020-10-15 13:19:35 +03001634 identify_subint (em,
1635 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001636 b0,
1637 match_flags0,
1638 main_intf0,
1639 vlan_intf0,
1640 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001641
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001642 // Save RX sw_if_index for later nodes
1643 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1644 error0 !=
1645 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001646
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001647 // Increment subinterface stats
1648 // Note that interface-level counters have already been incremented
1649 // prior to calling this function. Thus only subinterface counters
1650 // are incremented here.
1651 //
Damjan Marion607de1a2016-08-16 22:53:54 +02001652 // Interface level counters include packets received on the main
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001653 // interface and all subinterfaces. Subinterface level counters
1654 // include only those packets received on that subinterface
Ed Warnickecb9cada2015-12-08 15:45:58 -07001655 // Increment stats if the subint is valid and it is not the main intf
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001656 if ((new_sw_if_index0 != ~0)
1657 && (new_sw_if_index0 != old_sw_if_index0))
1658 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001659
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001660 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001661 - vnet_buffer (b0)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001662
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001663 stats_n_packets += 1;
1664 stats_n_bytes += len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001665
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001666 // Batch stat increments from the same subinterface so counters
Damjan Marion607de1a2016-08-16 22:53:54 +02001667 // don't need to be incremented for every packet.
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001668 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1669 {
1670 stats_n_packets -= 1;
1671 stats_n_bytes -= len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001672
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001673 if (new_sw_if_index0 != ~0)
1674 vlib_increment_combined_counter
1675 (vnm->interface_main.combined_sw_if_counters
1676 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001677 thread_index, new_sw_if_index0, 1, len0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001678 if (stats_n_packets > 0)
1679 {
1680 vlib_increment_combined_counter
1681 (vnm->interface_main.combined_sw_if_counters
1682 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001683 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001684 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1685 stats_n_packets = stats_n_bytes = 0;
1686 }
1687 stats_sw_if_index = new_sw_if_index0;
1688 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001689 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001690
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001691 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1692 is_l20 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001693
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001694 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1695 &next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001696
John Lo1904c472017-03-10 17:15:22 -05001697 ship_it0:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001698 b0->error = error_node->errors[error0];
1699
1700 // verify speculative enqueue
Ed Warnickecb9cada2015-12-08 15:45:58 -07001701 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1702 to_next, n_left_to_next,
1703 bi0, next0);
1704 }
1705
1706 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1707 }
1708
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001709 // Increment any remaining batched stats
1710 if (stats_n_packets > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001711 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001712 vlib_increment_combined_counter
1713 (vnm->interface_main.combined_sw_if_counters
1714 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001715 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001716 node->runtime_data[0] = stats_sw_if_index;
1717 }
Damjan Marion650223c2018-11-14 16:55:53 +01001718}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001719
Damjan Marion5beecec2018-09-10 13:09:21 +02001720VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1721 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001722 vlib_frame_t * frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001723{
Damjan Marion650223c2018-11-14 16:55:53 +01001724 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001725 u32 *from = vlib_frame_vector_args (frame);
1726 u32 n_packets = frame->n_vectors;
1727
1728 ethernet_input_trace (vm, node, frame);
1729
1730 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1731 {
Damjan Marion650223c2018-11-14 16:55:53 +01001732 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
Damjan Marion650223c2018-11-14 16:55:53 +01001733 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001734 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1735 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
Damjan Marion650223c2018-11-14 16:55:53 +01001736 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001737 else
1738 ethernet_input_inline (vm, node, from, n_packets,
1739 ETHERNET_INPUT_VARIANT_ETHERNET);
Damjan Marion650223c2018-11-14 16:55:53 +01001740 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001741}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001742
Damjan Marion5beecec2018-09-10 13:09:21 +02001743VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1744 vlib_node_runtime_t * node,
1745 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001746{
Damjan Marion650223c2018-11-14 16:55:53 +01001747 u32 *from = vlib_frame_vector_args (from_frame);
1748 u32 n_packets = from_frame->n_vectors;
1749 ethernet_input_trace (vm, node, from_frame);
1750 ethernet_input_inline (vm, node, from, n_packets,
1751 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1752 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001753}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001754
Damjan Marion5beecec2018-09-10 13:09:21 +02001755VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1756 vlib_node_runtime_t * node,
1757 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001758{
Damjan Marion650223c2018-11-14 16:55:53 +01001759 u32 *from = vlib_frame_vector_args (from_frame);
1760 u32 n_packets = from_frame->n_vectors;
1761 ethernet_input_trace (vm, node, from_frame);
1762 ethernet_input_inline (vm, node, from, n_packets,
1763 ETHERNET_INPUT_VARIANT_NOT_L2);
1764 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001765}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001766
1767
1768// Return the subinterface config struct for the given sw_if_index
1769// Also return via parameter the appropriate match flags for the
1770// configured number of tags.
1771// On error (unsupported or not ethernet) return 0.
1772static subint_config_t *
1773ethernet_sw_interface_get_config (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001774 u32 sw_if_index,
1775 u32 * flags, u32 * unsupported)
1776{
1777 ethernet_main_t *em = &ethernet_main;
1778 vnet_hw_interface_t *hi;
1779 vnet_sw_interface_t *si;
1780 main_intf_t *main_intf;
1781 vlan_table_t *vlan_table;
1782 qinq_table_t *qinq_table;
1783 subint_config_t *subint = 0;
1784
Ed Warnickecb9cada2015-12-08 15:45:58 -07001785 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1786
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001787 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1788 {
1789 *unsupported = 0;
1790 goto done; // non-ethernet interface
1791 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001792
1793 // ensure there's an entry for the main intf (shouldn't really be necessary)
1794 vec_validate (em->main_intfs, hi->hw_if_index);
1795 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1796
1797 // Locate the subint for the given ethernet config
1798 si = vnet_get_sw_interface (vnm, sw_if_index);
1799
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001800 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1801 {
1802 p2p_ethernet_main_t *p2pm = &p2p_main;
1803 u32 p2pe_sw_if_index =
1804 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1805 if (p2pe_sw_if_index == ~0)
1806 {
1807 pool_get (p2pm->p2p_subif_pool, subint);
1808 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1809 }
1810 else
1811 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1812 *flags = SUBINT_CONFIG_P2P;
1813 }
Neale Ranns17ff3c12018-07-04 10:24:24 -07001814 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1815 {
1816 pipe_t *pipe;
1817
1818 pipe = pipe_get (sw_if_index);
1819 subint = &pipe->subint;
1820 *flags = SUBINT_CONFIG_P2P;
1821 }
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001822 else if (si->sub.eth.flags.default_sub)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001823 {
1824 subint = &main_intf->default_subint;
Mike Bly88076742018-09-24 10:13:06 -07001825 *flags = SUBINT_CONFIG_MATCH_1_TAG |
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001826 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1827 }
1828 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1829 {
1830 // if no flags are set then this is a main interface
1831 // so treat as untagged
1832 subint = &main_intf->untagged_subint;
1833 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1834 }
1835 else
1836 {
1837 // one or two tags
1838 // first get the vlan table
1839 if (si->sub.eth.flags.dot1ad)
1840 {
1841 if (main_intf->dot1ad_vlans == 0)
1842 {
1843 // Allocate a vlan table from the pool
1844 pool_get (em->vlan_pool, vlan_table);
1845 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1846 }
1847 else
1848 {
1849 // Get ptr to existing vlan table
1850 vlan_table =
1851 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1852 }
1853 }
1854 else
1855 { // dot1q
1856 if (main_intf->dot1q_vlans == 0)
1857 {
1858 // Allocate a vlan table from the pool
1859 pool_get (em->vlan_pool, vlan_table);
1860 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1861 }
1862 else
1863 {
1864 // Get ptr to existing vlan table
1865 vlan_table =
1866 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1867 }
1868 }
1869
1870 if (si->sub.eth.flags.one_tag)
1871 {
1872 *flags = si->sub.eth.flags.exact_match ?
1873 SUBINT_CONFIG_MATCH_1_TAG :
1874 (SUBINT_CONFIG_MATCH_1_TAG |
1875 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1876
1877 if (si->sub.eth.flags.outer_vlan_id_any)
1878 {
1879 // not implemented yet
1880 *unsupported = 1;
1881 goto done;
1882 }
1883 else
1884 {
1885 // a single vlan, a common case
1886 subint =
1887 &vlan_table->vlans[si->sub.eth.
1888 outer_vlan_id].single_tag_subint;
1889 }
1890
1891 }
1892 else
1893 {
1894 // Two tags
1895 *flags = si->sub.eth.flags.exact_match ?
1896 SUBINT_CONFIG_MATCH_2_TAG :
1897 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1898
1899 if (si->sub.eth.flags.outer_vlan_id_any
1900 && si->sub.eth.flags.inner_vlan_id_any)
1901 {
1902 // not implemented yet
1903 *unsupported = 1;
1904 goto done;
1905 }
1906
1907 if (si->sub.eth.flags.inner_vlan_id_any)
1908 {
1909 // a specific outer and "any" inner
1910 // don't need a qinq table for this
1911 subint =
1912 &vlan_table->vlans[si->sub.eth.
1913 outer_vlan_id].inner_any_subint;
1914 if (si->sub.eth.flags.exact_match)
1915 {
1916 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1917 }
1918 else
1919 {
1920 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1921 SUBINT_CONFIG_MATCH_3_TAG;
1922 }
1923 }
1924 else
1925 {
Nathan Skrzypczak2c77ae42021-09-29 15:36:51 +02001926 // a specific outer + specific innner vlan id, a common case
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001927
1928 // get the qinq table
1929 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1930 {
1931 // Allocate a qinq table from the pool
1932 pool_get (em->qinq_pool, qinq_table);
1933 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1934 qinq_table - em->qinq_pool;
1935 }
1936 else
1937 {
1938 // Get ptr to existing qinq table
1939 qinq_table =
1940 vec_elt_at_index (em->qinq_pool,
1941 vlan_table->vlans[si->sub.
1942 eth.outer_vlan_id].
1943 qinqs);
1944 }
1945 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1946 }
1947 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001948 }
1949
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001950done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001951 return subint;
1952}
1953
Damjan Marion5beecec2018-09-10 13:09:21 +02001954static clib_error_t *
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001955ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001956{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001957 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001958 u32 placeholder_flags;
1959 u32 placeholder_unsup;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001960 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001961
1962 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001963 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001964 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1965 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001966
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001967 if (subint == 0)
1968 {
1969 // not implemented yet or not ethernet
1970 goto done;
1971 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001972
1973 subint->sw_if_index =
1974 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1975
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001976done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001977 return error;
1978}
1979
1980VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1981
1982
Damjan Marion5beecec2018-09-10 13:09:21 +02001983#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -07001984// Set the L2/L3 mode for the subinterface
1985void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001986ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001987{
1988 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001989 u32 placeholder_flags;
1990 u32 placeholder_unsup;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001991 int is_port;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001992 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001993
1994 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1995
1996 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001997 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001998 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1999 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002000
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002001 if (subint == 0)
2002 {
2003 // unimplemented or not ethernet
2004 goto done;
2005 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002006
2007 // Double check that the config we found is for our interface (or the interface is down)
2008 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2009
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002010 if (l2)
2011 {
2012 subint->flags |= SUBINT_CONFIG_L2;
2013 if (is_port)
2014 subint->flags |=
2015 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
2016 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
2017 }
2018 else
2019 {
2020 subint->flags &= ~SUBINT_CONFIG_L2;
2021 if (is_port)
2022 subint->flags &=
2023 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
2024 | SUBINT_CONFIG_MATCH_3_TAG);
2025 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002026
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002027done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002028 return;
2029}
2030
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002031/*
2032 * Set the L2/L3 mode for the subinterface regardless of port
2033 */
2034void
2035ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002036 u32 sw_if_index, u32 l2)
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002037{
2038 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04002039 u32 placeholder_flags;
2040 u32 placeholder_unsup;
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002041
2042 /* Find the config for this subinterface */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002043 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04002044 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
2045 &placeholder_unsup);
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002046
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002047 if (subint == 0)
2048 {
2049 /* unimplemented or not ethernet */
2050 goto done;
2051 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002052
2053 /*
2054 * Double check that the config we found is for our interface (or the
2055 * interface is down)
2056 */
2057 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2058
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002059 if (l2)
2060 {
2061 subint->flags |= SUBINT_CONFIG_L2;
2062 }
2063 else
2064 {
2065 subint->flags &= ~SUBINT_CONFIG_L2;
2066 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002067
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002068done:
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002069 return;
2070}
Damjan Marion5beecec2018-09-10 13:09:21 +02002071#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07002072
2073static clib_error_t *
2074ethernet_sw_interface_add_del (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002075 u32 sw_if_index, u32 is_create)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002076{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002077 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002078 subint_config_t *subint;
2079 u32 match_flags;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002080 u32 unsupported = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002081
2082 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002083 subint =
2084 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
2085 &unsupported);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002086
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002087 if (subint == 0)
2088 {
2089 // not implemented yet or not ethernet
2090 if (unsupported)
2091 {
Damjan Marion607de1a2016-08-16 22:53:54 +02002092 // this is the NYI case
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002093 error = clib_error_return (0, "not implemented yet");
2094 }
2095 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002096 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002097
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002098 if (!is_create)
2099 {
2100 subint->flags = 0;
2101 return error;
2102 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002103
2104 // Initialize the subint
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002105 if (subint->flags & SUBINT_CONFIG_VALID)
2106 {
2107 // Error vlan already in use
2108 error = clib_error_return (0, "vlan is already in use");
2109 }
2110 else
2111 {
Neale Ranns17ff3c12018-07-04 10:24:24 -07002112 // Note that config is L3 by default
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002113 subint->flags = SUBINT_CONFIG_VALID | match_flags;
2114 subint->sw_if_index = ~0; // because interfaces are initially down
2115 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002116
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002117done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002118 return error;
2119}
2120
2121VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2122
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002123static char *ethernet_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002124#define ethernet_error(n,c,s) s,
2125#include "error.def"
2126#undef ethernet_error
2127};
2128
2129VLIB_REGISTER_NODE (ethernet_input_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002130 .name = "ethernet-input",
2131 /* Takes a vector of packets. */
2132 .vector_size = sizeof (u32),
Damjan Marion650223c2018-11-14 16:55:53 +01002133 .scalar_size = sizeof (ethernet_input_frame_t),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002134 .n_errors = ETHERNET_N_ERROR,
2135 .error_strings = ethernet_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002136 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2137 .next_nodes = {
2138#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2139 foreach_ethernet_input_next
2140#undef _
2141 },
Ed Warnickecb9cada2015-12-08 15:45:58 -07002142 .format_buffer = format_ethernet_header_with_length,
2143 .format_trace = format_ethernet_input_trace,
2144 .unformat_buffer = unformat_ethernet_header,
2145};
2146
Damjan Marion5beecec2018-09-10 13:09:21 +02002147VLIB_REGISTER_NODE (ethernet_input_type_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002148 .name = "ethernet-input-type",
2149 /* Takes a vector of packets. */
2150 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002151 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2152 .next_nodes = {
2153#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2154 foreach_ethernet_input_next
2155#undef _
2156 },
2157};
2158
Damjan Marion5beecec2018-09-10 13:09:21 +02002159VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002160 .name = "ethernet-input-not-l2",
2161 /* Takes a vector of packets. */
2162 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002163 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2164 .next_nodes = {
2165#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2166 foreach_ethernet_input_next
2167#undef _
2168 },
2169};
2170
Damjan Marion5beecec2018-09-10 13:09:21 +02002171#ifndef CLIB_MARCH_VARIANT
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002172void
2173ethernet_set_rx_redirect (vnet_main_t * vnm,
2174 vnet_hw_interface_t * hi, u32 enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002175{
2176 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2177 // don't go directly to ip4-input)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002178 vnet_hw_interface_rx_redirect_to_node
2179 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002180}
2181
2182
2183/*
2184 * Initialization and registration for the next_by_ethernet structure
2185 */
2186
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002187clib_error_t *
2188next_by_ethertype_init (next_by_ethertype_t * l3_next)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002189{
2190 l3_next->input_next_by_type = sparse_vec_new
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002191 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002192 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2193
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002194 vec_validate (l3_next->sparse_index_by_input_next_index,
2195 ETHERNET_INPUT_NEXT_DROP);
2196 vec_validate (l3_next->sparse_index_by_input_next_index,
2197 ETHERNET_INPUT_NEXT_PUNT);
2198 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2199 SPARSE_VEC_INVALID_INDEX;
2200 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2201 SPARSE_VEC_INVALID_INDEX;
2202
Damjan Marion607de1a2016-08-16 22:53:54 +02002203 /*
2204 * Make sure we don't wipe out an ethernet registration by mistake
Dave Barach1f49ed62016-02-24 11:29:06 -05002205 * Can happen if init function ordering constraints are missing.
2206 */
2207 if (CLIB_DEBUG > 0)
2208 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002209 ethernet_main_t *em = &ethernet_main;
2210 ASSERT (em->next_by_ethertype_register_called == 0);
Dave Barach1f49ed62016-02-24 11:29:06 -05002211 }
2212
Ed Warnickecb9cada2015-12-08 15:45:58 -07002213 return 0;
2214}
2215
2216// Add an ethertype -> next index mapping to the structure
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002217clib_error_t *
2218next_by_ethertype_register (next_by_ethertype_t * l3_next,
2219 u32 ethertype, u32 next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002220{
2221 u32 i;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002222 u16 *n;
2223 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002224
Dave Barach1f49ed62016-02-24 11:29:06 -05002225 if (CLIB_DEBUG > 0)
2226 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002227 ethernet_main_t *em = &ethernet_main;
Dave Barach1f49ed62016-02-24 11:29:06 -05002228 em->next_by_ethertype_register_called = 1;
2229 }
2230
Ed Warnickecb9cada2015-12-08 15:45:58 -07002231 /* Setup ethernet type -> next index sparse vector mapping. */
2232 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2233 n[0] = next_index;
2234
2235 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2236 is updated. */
2237 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2238 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002239 l3_next->
2240 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002241
2242 // do not allow the cached next index's to be updated if L3
2243 // redirect is enabled, as it will have overwritten them
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002244 if (!em->redirect_l3)
2245 {
2246 // Cache common ethertypes directly
2247 if (ethertype == ETHERNET_TYPE_IP4)
2248 {
2249 l3_next->input_next_ip4 = next_index;
2250 }
2251 else if (ethertype == ETHERNET_TYPE_IP6)
2252 {
2253 l3_next->input_next_ip6 = next_index;
2254 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002255 else if (ethertype == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002256 {
2257 l3_next->input_next_mpls = next_index;
2258 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002259 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002260 return 0;
2261}
2262
Dave Barachf8d50682019-05-14 18:01:44 -04002263void
Neale Ranns68d48d92021-06-03 14:59:47 +00002264ethernet_setup_node (vlib_main_t *vm, u32 node_index)
2265{
2266 vlib_node_t *n = vlib_get_node (vm, node_index);
2267 pg_node_t *pn = pg_get_node (node_index);
2268
2269 n->format_buffer = format_ethernet_header_with_length;
2270 n->unformat_buffer = unformat_ethernet_header;
2271 pn->unformat_edit = unformat_pg_ethernet_header;
2272}
2273
2274void
Dave Barachf8d50682019-05-14 18:01:44 -04002275ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002276{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002277 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2278 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002279
2280 ethernet_setup_node (vm, ethernet_input_node.index);
2281 ethernet_setup_node (vm, ethernet_input_type_node.index);
2282 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2283
2284 next_by_ethertype_init (&em->l3_next);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002285
Ed Warnickecb9cada2015-12-08 15:45:58 -07002286 // Initialize pools and vector for vlan parsing
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002287 vec_validate (em->main_intfs, 10); // 10 main interfaces
2288 pool_alloc (em->vlan_pool, 10);
2289 pool_alloc (em->qinq_pool, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002290
2291 // The first vlan pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002292 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002293 // The first qinq pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002294 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002295}
2296
Ed Warnickecb9cada2015-12-08 15:45:58 -07002297void
2298ethernet_register_input_type (vlib_main_t * vm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002299 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002300{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002301 ethernet_main_t *em = &ethernet_main;
2302 ethernet_type_info_t *ti;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002303 u32 i;
2304
2305 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002306 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002307 if (error)
2308 clib_error_report (error);
2309 }
2310
2311 ti = ethernet_get_type_info (em, type);
Dave Barach4bda2d92019-07-03 15:21:50 -04002312 if (ti == 0)
2313 {
2314 clib_warning ("type_info NULL for type %d", type);
2315 return;
2316 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002317 ti->node_index = node_index;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002318 ti->next_index = vlib_node_add_next (vm,
2319 ethernet_input_node.index, node_index);
2320 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002321 ASSERT (i == ti->next_index);
2322
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002323 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002324 ASSERT (i == ti->next_index);
2325
2326 // Add the L3 node for this ethertype to the next nodes structure
2327 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2328
2329 // Call the registration functions for other nodes that want a mapping
2330 l2bvi_register_input_type (vm, type, node_index);
2331}
2332
2333void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002334ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002335{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002336 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002337 u32 i;
2338
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002339 em->l2_next =
2340 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002341
Damjan Marion607de1a2016-08-16 22:53:54 +02002342 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002343 * Even if we never use these arcs, we have to align the next indices...
2344 */
2345 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2346
2347 ASSERT (i == em->l2_next);
2348
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002349 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002350 ASSERT (i == em->l2_next);
2351}
2352
2353// Register a next node for L3 redirect, and enable L3 redirect
2354void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002355ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002356{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002357 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002358 u32 i;
2359
2360 em->redirect_l3 = 1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002361 em->redirect_l3_next = vlib_node_add_next (vm,
2362 ethernet_input_node.index,
2363 node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002364 /*
2365 * Change the cached next nodes to the redirect node
2366 */
2367 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2368 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2369 em->l3_next.input_next_mpls = em->redirect_l3_next;
2370
2371 /*
2372 * Even if we never use these arcs, we have to align the next indices...
2373 */
2374 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2375
2376 ASSERT (i == em->redirect_l3_next);
jerryianff82ed62016-12-05 17:13:00 +08002377
2378 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2379
2380 ASSERT (i == em->redirect_l3_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002381}
Damjan Marion5beecec2018-09-10 13:09:21 +02002382#endif
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002383
2384/*
2385 * fd.io coding-style-patch-verification: ON
2386 *
2387 * Local Variables:
2388 * eval: (c-set-style "gnu")
2389 * End:
2390 */