blob: 73e9eec71ae3849619c4d3b9e7514e64412450b9 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Damjan Marion650223c2018-11-14 16:55:53 +01002 * Copyright (c) 2018 Cisco and/or its affiliates.
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ethernet_node.c: ethernet packet processing
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vnet/pg/pg.h>
42#include <vnet/ethernet/ethernet.h>
Pavel Kotucek15ac81c2017-06-20 14:00:26 +020043#include <vnet/ethernet/p2p_ethernet.h>
Neale Ranns17ff3c12018-07-04 10:24:24 -070044#include <vnet/devices/pipe/pipe.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045#include <vppinfra/sparse_vec.h>
46#include <vnet/l2/l2_bvi.h>
Dave Barach9137e542019-09-13 17:47:50 -040047#include <vnet/classify/trace_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
Damjan Marion650223c2018-11-14 16:55:53 +010052 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070056typedef enum
57{
Ed Warnickecb9cada2015-12-08 15:45:58 -070058#define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
60#undef _
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070061 ETHERNET_INPUT_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070062} ethernet_input_next_t;
63
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070064typedef struct
65{
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 u8 packet_data[32];
Damjan Marion650223c2018-11-14 16:55:53 +010067 u16 frame_flags;
68 ethernet_input_frame_t frame_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} ethernet_input_trace_t;
70
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070071static u8 *
72format_ethernet_input_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070076 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
Damjan Marion650223c2018-11-14 16:55:53 +010077 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078
Damjan Marion650223c2018-11-14 16:55:53 +010079 if (t->frame_flags)
80 {
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 s = format (s, "%U", format_ethernet_header, t->packet_data);
88
89 return s;
90}
91
Damjan Marione849da22018-09-12 13:32:01 +020092extern vlib_node_registration_t ethernet_input_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070094typedef enum
95{
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
Ed Warnickecb9cada2015-12-08 15:45:58 -070098 ETHERNET_INPUT_VARIANT_NOT_L2,
99} ethernet_input_variant_t;
100
101
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102// Parse the ethernet header to extract vlan tags and innermost ethertype
103static_always_inline void
104parse_header (ethernet_input_variant_t variant,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700105 vlib_buffer_t * b0,
106 u16 * type,
107 u16 * orig_type,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
109{
Chris Luke194ebc52016-04-25 14:26:55 -0400110 u8 vlan_count;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700111
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
114 {
115 ethernet_header_t *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116
Zhiyong Yang9f833582020-04-11 14:36:55 +0000117 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Damjan Marion072401e2017-07-13 18:53:27 +0200119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
Steven35de3b32017-12-03 23:40:54 -0800120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700122 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700124 *type = clib_net_to_host_u16 (e0->type);
125 }
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
127 {
128 // here when prior node was LLC/SNAP processing
129 u16 *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
Zhiyong Yang9f833582020-04-11 14:36:55 +0000131 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700133 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700135 *type = clib_net_to_host_u16 (e0[0]);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137
138 // save for distinguishing between dot1q and dot1ad later
139 *orig_type = *type;
140
141 // default the tags to 0 (used if there is no corresponding tag)
142 *outer_id = 0;
143 *inner_id = 0;
144
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
Chris Luke194ebc52016-04-25 14:26:55 -0400146 vlan_count = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 // check for vlan encaps
Damjan Marionb94bdad2016-09-19 11:32:03 +0200149 if (ethernet_frame_is_tagged (*type))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700150 {
151 ethernet_vlan_header_t *h0;
152 u16 tag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155
Zhiyong Yang9f833582020-04-11 14:36:55 +0000156 h0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
159
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700160 *outer_id = tag & 0xfff;
Neale Ranns30d0fd42017-05-30 07:30:04 -0700161 if (0 == *outer_id)
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700164 *type = clib_net_to_host_u16 (h0->type);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165
166 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700167 vlan_count = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700169 if (*type == ETHERNET_TYPE_VLAN)
170 {
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
173
Zhiyong Yang9f833582020-04-11 14:36:55 +0000174 h0 = vlib_buffer_get_current (b0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700175
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
177
178 *inner_id = tag & 0xfff;
179
180 *type = clib_net_to_host_u16 (h0->type);
181
182 vlib_buffer_advance (b0, sizeof (h0[0]));
183 vlan_count = 2;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700184 if (*type == ETHERNET_TYPE_VLAN)
185 {
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300188
189 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700190 vlan_count = 3; // "unknown" number, aka, 3-or-more
191 }
192 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700194 ethernet_buffer_set_vlan_count (b0, vlan_count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195}
196
Matthew Smith42bde452019-11-18 09:35:24 -0600197static_always_inline void
198ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
199 u64 * dmacs, u8 * dmacs_bad,
200 u32 n_packets, ethernet_interface_t * ei,
201 u8 have_sec_dmac);
202
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203// Determine the subinterface for this packet, given the result of the
204// vlan table lookups and vlan header parsing. Check the most specific
205// matches first.
206static_always_inline void
Ivan Shvedunov72869432020-10-15 13:19:35 +0300207identify_subint (ethernet_main_t * em,
208 vnet_hw_interface_t * hi,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700209 vlib_buffer_t * b0,
210 u32 match_flags,
211 main_intf_t * main_intf,
212 vlan_intf_t * vlan_intf,
213 qinq_intf_t * qinq_intf,
214 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215{
216 u32 matched;
Ivan Shvedunov72869432020-10-15 13:19:35 +0300217 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
Damjan Marionddf6e082018-11-26 16:05:07 +0100219 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
220 qinq_intf, new_sw_if_index, error0, is_l2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700222 if (matched)
223 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700224 // Perform L3 my-mac filter
John Lo4a302ee2020-05-12 22:34:39 -0400225 // A unicast packet arriving on an L3 interface must have a dmac
226 // matching the interface mac. If interface has STATUS_L3 bit set
227 // mac filter is already done.
Ivan Shvedunov72869432020-10-15 13:19:35 +0300228 if (!(*is_l2 || (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700229 {
Matthew Smith42bde452019-11-18 09:35:24 -0600230 u64 dmacs[2];
231 u8 dmacs_bad[2];
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700232 ethernet_header_t *e0;
Matthew Smith42bde452019-11-18 09:35:24 -0600233 ethernet_interface_t *ei0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234
Matthew Smith42bde452019-11-18 09:35:24 -0600235 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
236 dmacs[0] = *(u64 *) e0;
237 ei0 = ethernet_get_interface (&ethernet_main, hi->hw_if_index);
238
239 if (ei0 && vec_len (ei0->secondary_addrs))
240 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
241 1 /* n_packets */ , ei0,
242 1 /* have_sec_dmac */ );
243 else
244 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
245 1 /* n_packets */ , ei0,
246 0 /* have_sec_dmac */ );
Matthew Smith42bde452019-11-18 09:35:24 -0600247 if (dmacs_bad[0])
248 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700249 }
250
251 // Check for down subinterface
252 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254}
255
256static_always_inline void
257determine_next_node (ethernet_main_t * em,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700258 ethernet_input_variant_t variant,
259 u32 is_l20,
260 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261{
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200262 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
263 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
264
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700265 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
266 {
267 // some error occurred
268 *next0 = ETHERNET_INPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700270 else if (is_l20)
271 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700272 // record the L2 len and reset the buffer so the L2 header is preserved
John Lob14826e2018-04-18 15:52:23 -0400273 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
274 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
275 *next0 = em->l2_next;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300276 ASSERT (vnet_buffer (b0)->l2.l2_len ==
277 ethernet_buffer_header_size (b0));
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200278 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700279
280 // check for common IP/MPLS ethertypes
281 }
282 else if (type0 == ETHERNET_TYPE_IP4)
283 {
284 *next0 = em->l3_next.input_next_ip4;
285 }
286 else if (type0 == ETHERNET_TYPE_IP6)
287 {
288 *next0 = em->l3_next.input_next_ip6;
289 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800290 else if (type0 == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700291 {
292 *next0 = em->l3_next.input_next_mpls;
293
294 }
295 else if (em->redirect_l3)
296 {
297 // L3 Redirect is on, the cached common next nodes will be
298 // pointing to the redirect node, catch the uncommon types here
299 *next0 = em->redirect_l3_next;
300 }
301 else
302 {
303 // uncommon ethertype, check table
304 u32 i0;
305 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
306 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
307 *error0 =
308 i0 ==
309 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
310
311 // The table is not populated with LLC values, so check that now.
Damjan Marion607de1a2016-08-16 22:53:54 +0200312 // If variant is variant_ethernet then we came from LLC processing. Don't
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700313 // go back there; drop instead using by keeping the drop/bad table result.
314 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
315 {
316 *next0 = ETHERNET_INPUT_NEXT_LLC;
317 }
318 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319}
320
Damjan Marion650223c2018-11-14 16:55:53 +0100321
322/* following vector code relies on following assumptions */
323STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
324STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
325STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
326STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
327 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
328 "l3_hdr_offset must follow l2_hdr_offset");
329
330static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100331eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100332{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100333 i16 adv = sizeof (ethernet_header_t);
334 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
335 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
336
Damjan Marion650223c2018-11-14 16:55:53 +0100337#ifdef CLIB_HAVE_VEC256
338 /* to reduce number of small loads/stores we are loading first 64 bits
339 of each buffer metadata into 256-bit register so we can advance
340 current_data, current_length and flags.
341 Observed saving of this code is ~2 clocks per packet */
342 u64x4 r, radv;
343
344 /* vector if signed 16 bit integers used in signed vector add operation
345 to advnce current_data and current_length */
346 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
347 i16x16 adv4 = {
348 adv, -adv, 0, 0, adv, -adv, 0, 0,
349 adv, -adv, 0, 0, adv, -adv, 0, 0
350 };
351
352 /* load 4 x 64 bits */
353 r = u64x4_gather (b[0], b[1], b[2], b[3]);
354
355 /* set flags */
356 r |= (u64x4) flags4;
357
358 /* advance buffer */
359 radv = (u64x4) ((i16x16) r + adv4);
360
361 /* write 4 x 64 bits */
362 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
363
364 /* use old current_data as l2_hdr_offset and new current_data as
365 l3_hdr_offset */
366 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
367
368 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
369 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
370 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
371 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
372 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
373
Damjan Marione9cebdf2018-11-21 00:47:42 +0100374 if (is_l3)
375 {
376 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
377 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
378 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
379 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
Damjan Marion650223c2018-11-14 16:55:53 +0100380
Damjan Marione9cebdf2018-11-21 00:47:42 +0100381 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
382 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
383 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
384 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
385 }
386 else
387 {
388 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
389 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
390 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
391 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
392
393 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
394 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
395 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
396 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
397 }
Damjan Marion650223c2018-11-14 16:55:53 +0100398
399#else
400 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
401 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
402 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
403 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
404 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
405 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
406 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
407 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
408
409 if (is_l3)
410 {
411 vlib_buffer_advance (b[0], adv);
412 vlib_buffer_advance (b[1], adv);
413 vlib_buffer_advance (b[2], adv);
414 vlib_buffer_advance (b[3], adv);
415 }
416
417 b[0]->flags |= flags;
418 b[1]->flags |= flags;
419 b[2]->flags |= flags;
420 b[3]->flags |= flags;
421#endif
422
423 if (!is_l3)
424 {
425 vnet_buffer (b[0])->l2.l2_len = adv;
426 vnet_buffer (b[1])->l2.l2_len = adv;
427 vnet_buffer (b[2])->l2.l2_len = adv;
428 vnet_buffer (b[3])->l2.l2_len = adv;
429 }
430}
431
432static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100433eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100434{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100435 i16 adv = sizeof (ethernet_header_t);
436 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
437 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
438
Damjan Marion650223c2018-11-14 16:55:53 +0100439 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
440 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
441
442 if (is_l3)
443 vlib_buffer_advance (b[0], adv);
444 b[0]->flags |= flags;
445 if (!is_l3)
446 vnet_buffer (b[0])->l2.l2_len = adv;
447}
448
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100449
Damjan Marion650223c2018-11-14 16:55:53 +0100450static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100451eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
452 u64 * dmacs, int offset, int dmac_check)
Damjan Marion650223c2018-11-14 16:55:53 +0100453{
Damjan Marion650223c2018-11-14 16:55:53 +0100454 ethernet_header_t *e;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100455 e = vlib_buffer_get_current (b[offset]);
456#ifdef CLIB_HAVE_VEC128
457 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
458 etype[offset] = ((u16x8) r)[3];
459 tags[offset] = r[1];
460#else
461 etype[offset] = e->type;
462 tags[offset] = *(u64 *) (e + 1);
463#endif
Damjan Marion650223c2018-11-14 16:55:53 +0100464
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100465 if (dmac_check)
466 dmacs[offset] = *(u64 *) e;
467}
Damjan Marion650223c2018-11-14 16:55:53 +0100468
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100469static_always_inline u16
470eth_input_next_by_type (u16 etype)
471{
472 ethernet_main_t *em = &ethernet_main;
473
474 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
475 vec_elt (em->l3_next.input_next_by_type,
476 sparse_vec_index (em->l3_next.input_next_by_type, etype));
477}
478
479typedef struct
480{
481 u64 tag, mask;
482 u32 sw_if_index;
483 u16 type, len, next;
484 i16 adv;
485 u8 err, n_tags;
486 u64 n_packets, n_bytes;
487} eth_input_tag_lookup_t;
488
489static_always_inline void
490eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
491 eth_input_tag_lookup_t * l)
492{
493 if (l->n_packets == 0 || l->sw_if_index == ~0)
494 return;
495
496 if (l->adv > 0)
497 l->n_bytes += l->n_packets * l->len;
498
499 vlib_increment_combined_counter
500 (vnm->interface_main.combined_sw_if_counters +
501 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
502 l->n_packets, l->n_bytes);
503}
504
505static_always_inline void
506eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
507 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
508 u64 tag, u16 * next, vlib_buffer_t * b,
509 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
510 int main_is_l3, int check_dmac)
511{
512 ethernet_main_t *em = &ethernet_main;
513
514 if ((tag ^ l->tag) & l->mask)
Damjan Marion650223c2018-11-14 16:55:53 +0100515 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100516 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
517 vlan_intf_t *vif;
518 qinq_intf_t *qif;
519 vlan_table_t *vlan_table;
520 qinq_table_t *qinq_table;
521 u16 *t = (u16 *) & tag;
522 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
523 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
524 u32 matched, is_l2, new_sw_if_index;
525
526 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
527 mif->dot1ad_vlans : mif->dot1q_vlans);
528 vif = &vlan_table->vlans[vlan1];
529 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
530 qif = &qinq_table->vlans[vlan2];
531 l->err = ETHERNET_ERROR_NONE;
532 l->type = clib_net_to_host_u16 (t[1]);
533
534 if (l->type == ETHERNET_TYPE_VLAN)
535 {
536 l->type = clib_net_to_host_u16 (t[3]);
537 l->n_tags = 2;
538 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
539 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
540 qif, &new_sw_if_index, &l->err,
541 &is_l2);
542 }
543 else
544 {
545 l->n_tags = 1;
546 if (vlan1 == 0)
547 {
548 new_sw_if_index = hi->sw_if_index;
549 l->err = ETHERNET_ERROR_NONE;
550 matched = 1;
551 is_l2 = main_is_l3 == 0;
552 }
553 else
554 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
555 SUBINT_CONFIG_MATCH_1_TAG, mif,
556 vif, qif, &new_sw_if_index,
557 &l->err, &is_l2);
558 }
559
560 if (l->sw_if_index != new_sw_if_index)
561 {
562 eth_input_update_if_counters (vm, vnm, l);
563 l->n_packets = 0;
564 l->n_bytes = 0;
565 l->sw_if_index = new_sw_if_index;
566 }
567 l->tag = tag;
568 l->mask = (l->n_tags == 2) ?
569 clib_net_to_host_u64 (0xffffffffffffffff) :
570 clib_net_to_host_u64 (0xffffffff00000000);
571
572 if (matched && l->sw_if_index == ~0)
573 l->err = ETHERNET_ERROR_DOWN;
574
575 l->len = sizeof (ethernet_header_t) +
576 l->n_tags * sizeof (ethernet_vlan_header_t);
577 if (main_is_l3)
578 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
579 l->n_tags * sizeof (ethernet_vlan_header_t);
580 else
581 l->adv = is_l2 ? 0 : l->len;
582
583 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
584 l->next = ETHERNET_INPUT_NEXT_DROP;
585 else if (is_l2)
586 l->next = em->l2_next;
587 else if (l->type == ETHERNET_TYPE_IP4)
588 l->next = em->l3_next.input_next_ip4;
589 else if (l->type == ETHERNET_TYPE_IP6)
590 l->next = em->l3_next.input_next_ip6;
591 else if (l->type == ETHERNET_TYPE_MPLS)
592 l->next = em->l3_next.input_next_mpls;
593 else if (em->redirect_l3)
594 l->next = em->redirect_l3_next;
595 else
596 {
597 l->next = eth_input_next_by_type (l->type);
598 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
599 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
600 }
601 }
602
603 if (check_dmac && l->adv > 0 && dmac_bad)
604 {
605 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
606 next[0] = ETHERNET_INPUT_NEXT_PUNT;
607 }
608 else
609 next[0] = l->next;
610
611 vlib_buffer_advance (b, l->adv);
612 vnet_buffer (b)->l2.l2_len = l->len;
613 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
614
615 if (l->err == ETHERNET_ERROR_NONE)
616 {
617 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
618 ethernet_buffer_set_vlan_count (b, l->n_tags);
619 }
620 else
621 b->error = node->errors[l->err];
622
623 /* update counters */
624 l->n_packets += 1;
625 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
626}
627
Matthew G Smithd459bf32019-09-04 15:01:04 -0500628#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
629#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
630
631#ifdef CLIB_HAVE_VEC256
632static_always_inline u32
633is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
634{
635 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
636 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
637 return u8x32_msb_mask ((u8x32) (r0));
638}
Matthew Smith42bde452019-11-18 09:35:24 -0600639#endif
640
Matthew G Smithd459bf32019-09-04 15:01:04 -0500641static_always_inline u8
642is_dmac_bad (u64 dmac, u64 hwaddr)
643{
644 u64 r0 = dmac & DMAC_MASK;
645 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
646}
Matthew G Smithd459bf32019-09-04 15:01:04 -0500647
648static_always_inline u8
649is_sec_dmac_bad (u64 dmac, u64 hwaddr)
650{
651 return ((dmac & DMAC_MASK) != hwaddr);
652}
653
654#ifdef CLIB_HAVE_VEC256
655static_always_inline u32
656is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
657{
658 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
659 r0 = (r0 != u64x4_splat (hwaddr));
660 return u8x32_msb_mask ((u8x32) (r0));
661}
662#endif
663
664static_always_inline u8
665eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
666{
667 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
668 return dmac_bad[0];
669}
670
671static_always_inline u32
672eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
673{
674#ifdef CLIB_HAVE_VEC256
675 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
676#else
677 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
678 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
679 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
680 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
681#endif
682 return *(u32 *) dmac_bad;
683}
684
Matthew Smith42bde452019-11-18 09:35:24 -0600685/*
686 * DMAC check for ethernet_input_inline()
687 *
688 * dmacs and dmacs_bad are arrays that are 2 elements long
689 * n_packets should be 1 or 2 for ethernet_input_inline()
690 */
691static_always_inline void
692ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
693 u64 * dmacs, u8 * dmacs_bad,
694 u32 n_packets, ethernet_interface_t * ei,
695 u8 have_sec_dmac)
696{
697 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
698 u8 bad = 0;
699
700 dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
701 dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));
702
703 bad = dmacs_bad[0] | dmacs_bad[1];
704
705 if (PREDICT_FALSE (bad && have_sec_dmac))
706 {
707 mac_address_t *sec_addr;
708
709 vec_foreach (sec_addr, ei->secondary_addrs)
710 {
711 hwaddr = (*(u64 *) sec_addr) & DMAC_MASK;
712
713 bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
714 eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
715 dmacs_bad + 1));
716
717 if (!bad)
718 return;
719 }
720 }
721}
722
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500723static_always_inline void
724eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
725 u64 * dmacs, u8 * dmacs_bad,
Matthew G Smithd459bf32019-09-04 15:01:04 -0500726 u32 n_packets, ethernet_interface_t * ei,
727 u8 have_sec_dmac)
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500728{
Matthew G Smithd459bf32019-09-04 15:01:04 -0500729 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500730 u64 *dmac = dmacs;
731 u8 *dmac_bad = dmacs_bad;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500732 u32 bad = 0;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500733 i32 n_left = n_packets;
734
735#ifdef CLIB_HAVE_VEC256
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500736 while (n_left > 0)
737 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500738 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
739 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500740
741 /* next */
742 dmac += 8;
743 dmac_bad += 8;
744 n_left -= 8;
745 }
746#else
747 while (n_left > 0)
748 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500749 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
750 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
751 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
752 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500753
754 /* next */
755 dmac += 4;
756 dmac_bad += 4;
757 n_left -= 4;
758 }
759#endif
Matthew G Smithd459bf32019-09-04 15:01:04 -0500760
761 if (have_sec_dmac && bad)
762 {
763 mac_address_t *addr;
764
765 vec_foreach (addr, ei->secondary_addrs)
766 {
767 u64 hwaddr = ((u64 *) addr)[0] & DMAC_MASK;
768 i32 n_left = n_packets;
769 u64 *dmac = dmacs;
770 u8 *dmac_bad = dmacs_bad;
771
772 bad = 0;
773
774 while (n_left > 0)
775 {
776 int adv = 0;
777 int n_bad;
778
779 /* skip any that have already matched */
780 if (!dmac_bad[0])
781 {
782 dmac += 1;
783 dmac_bad += 1;
784 n_left -= 1;
785 continue;
786 }
787
788 n_bad = clib_min (4, n_left);
789
790 /* If >= 4 left, compare 4 together */
791 if (n_bad == 4)
792 {
793 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
794 adv = 4;
795 n_bad = 0;
796 }
797
798 /* handle individually */
799 while (n_bad > 0)
800 {
801 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
802 dmac_bad + adv);
803 adv += 1;
804 n_bad -= 1;
805 }
806
807 dmac += adv;
808 dmac_bad += adv;
809 n_left -= adv;
810 }
811
812 if (!bad) /* can stop looping if everything matched */
813 break;
814 }
815 }
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500816}
817
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100818/* process frame of buffers, store ethertype into array and update
819 buffer metadata fields depending on interface being l2 or l3 assuming that
820 packets are untagged. For tagged packets those fields are updated later.
821 Optionally store Destionation MAC address and tag data into arrays
822 for further processing */
823
824STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
825 "VLIB_FRAME_SIZE must be power of 8");
826static_always_inline void
827eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
828 vnet_hw_interface_t * hi,
829 u32 * buffer_indices, u32 n_packets, int main_is_l3,
830 int ip4_cksum_ok, int dmac_check)
831{
832 ethernet_main_t *em = &ethernet_main;
833 u16 nexts[VLIB_FRAME_SIZE], *next;
834 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
835 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
836 u8 dmacs_bad[VLIB_FRAME_SIZE];
837 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
838 u16 slowpath_indices[VLIB_FRAME_SIZE];
839 u16 n_slowpath, i;
840 u16 next_ip4, next_ip6, next_mpls, next_l2;
841 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
842 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
843 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
844 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
845 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
846 i32 n_left = n_packets;
Zhiyong Yang70312882020-03-27 17:12:35 +0000847 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
848 vlib_buffer_t **b = bufs;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500849 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100850
Zhiyong Yang70312882020-03-27 17:12:35 +0000851 vlib_get_buffers (vm, buffer_indices, b, n_left);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100852
853 while (n_left >= 20)
854 {
855 vlib_buffer_t **ph = b + 16, **pd = b + 8;
Damjan Marion650223c2018-11-14 16:55:53 +0100856
857 vlib_prefetch_buffer_header (ph[0], LOAD);
858 vlib_prefetch_buffer_data (pd[0], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100859 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100860
861 vlib_prefetch_buffer_header (ph[1], LOAD);
862 vlib_prefetch_buffer_data (pd[1], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100863 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100864
865 vlib_prefetch_buffer_header (ph[2], LOAD);
866 vlib_prefetch_buffer_data (pd[2], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100867 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100868
869 vlib_prefetch_buffer_header (ph[3], LOAD);
870 vlib_prefetch_buffer_data (pd[3], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100871 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100872
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100873 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100874
875 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000876 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100877 n_left -= 4;
878 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100879 tag += 4;
880 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100881 }
882 while (n_left >= 4)
883 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100884 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
885 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
886 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
887 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
888 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100889
890 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000891 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100892 n_left -= 4;
893 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100894 tag += 4;
895 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100896 }
897 while (n_left)
898 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100899 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
900 eth_input_adv_and_flags_x1 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100901
902 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000903 b += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100904 n_left -= 1;
905 etype += 1;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100906 tag += 1;
Zhiyong Yangcbe36e42020-05-09 07:13:34 +0000907 dmac += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100908 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100909
910 if (dmac_check)
Matthew G Smithd459bf32019-09-04 15:01:04 -0500911 {
Matthew Smith49389382019-10-02 16:34:27 -0500912 if (ei && vec_len (ei->secondary_addrs))
Matthew G Smithd459bf32019-09-04 15:01:04 -0500913 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
914 ei, 1 /* have_sec_dmac */ );
915 else
916 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
917 ei, 0 /* have_sec_dmac */ );
918 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100919
920 next_ip4 = em->l3_next.input_next_ip4;
921 next_ip6 = em->l3_next.input_next_ip6;
922 next_mpls = em->l3_next.input_next_mpls;
923 next_l2 = em->l2_next;
924
925 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
926 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
927
928#ifdef CLIB_HAVE_VEC256
929 u16x16 et16_ip4 = u16x16_splat (et_ip4);
930 u16x16 et16_ip6 = u16x16_splat (et_ip6);
931 u16x16 et16_mpls = u16x16_splat (et_mpls);
932 u16x16 et16_vlan = u16x16_splat (et_vlan);
933 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
934 u16x16 next16_ip4 = u16x16_splat (next_ip4);
935 u16x16 next16_ip6 = u16x16_splat (next_ip6);
936 u16x16 next16_mpls = u16x16_splat (next_mpls);
937 u16x16 next16_l2 = u16x16_splat (next_l2);
938 u16x16 zero = { 0 };
939 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
940#endif
941
942 etype = etypes;
943 n_left = n_packets;
944 next = nexts;
945 n_slowpath = 0;
946 i = 0;
947
948 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
949 are considered as slowpath, in l2 mode all untagged packets are
950 considered as fastpath */
951 while (n_left > 0)
952 {
953#ifdef CLIB_HAVE_VEC256
954 if (n_left >= 16)
955 {
956 u16x16 r = zero;
957 u16x16 e16 = u16x16_load_unaligned (etype);
958 if (main_is_l3)
959 {
960 r += (e16 == et16_ip4) & next16_ip4;
961 r += (e16 == et16_ip6) & next16_ip6;
962 r += (e16 == et16_mpls) & next16_mpls;
963 }
964 else
965 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
966 u16x16_store_unaligned (r, next);
967
968 if (!u16x16_is_all_zero (r == zero))
969 {
970 if (u16x16_is_all_zero (r))
971 {
972 u16x16_store_unaligned (u16x16_splat (i) + stairs,
973 slowpath_indices + n_slowpath);
974 n_slowpath += 16;
975 }
976 else
977 {
978 for (int j = 0; j < 16; j++)
979 if (next[j] == 0)
980 slowpath_indices[n_slowpath++] = i + j;
981 }
982 }
983
984 etype += 16;
985 next += 16;
986 n_left -= 16;
987 i += 16;
988 continue;
989 }
990#endif
991 if (main_is_l3 && etype[0] == et_ip4)
992 next[0] = next_ip4;
993 else if (main_is_l3 && etype[0] == et_ip6)
994 next[0] = next_ip6;
995 else if (main_is_l3 && etype[0] == et_mpls)
996 next[0] = next_mpls;
997 else if (main_is_l3 == 0 &&
998 etype[0] != et_vlan && etype[0] != et_dot1ad)
999 next[0] = next_l2;
1000 else
1001 {
1002 next[0] = 0;
1003 slowpath_indices[n_slowpath++] = i;
1004 }
1005
1006 etype += 1;
1007 next += 1;
1008 n_left -= 1;
1009 i += 1;
1010 }
1011
1012 if (n_slowpath)
1013 {
1014 vnet_main_t *vnm = vnet_get_main ();
1015 n_left = n_slowpath;
1016 u16 *si = slowpath_indices;
1017 u32 last_unknown_etype = ~0;
1018 u32 last_unknown_next = ~0;
1019 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
1020 .mask = -1LL,
1021 .tag = tags[si[0]] ^ -1LL,
1022 .sw_if_index = ~0
1023 };
1024
1025 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
1026
1027 while (n_left)
1028 {
1029 i = si[0];
1030 u16 etype = etypes[i];
1031
1032 if (etype == et_vlan)
1033 {
1034 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1035 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1036 &dot1q_lookup, dmacs_bad[i], 0,
1037 main_is_l3, dmac_check);
1038
1039 }
1040 else if (etype == et_dot1ad)
1041 {
1042 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1043 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1044 &dot1ad_lookup, dmacs_bad[i], 1,
1045 main_is_l3, dmac_check);
1046 }
1047 else
1048 {
1049 /* untagged packet with not well known etyertype */
1050 if (last_unknown_etype != etype)
1051 {
1052 last_unknown_etype = etype;
1053 etype = clib_host_to_net_u16 (etype);
1054 last_unknown_next = eth_input_next_by_type (etype);
1055 }
1056 if (dmac_check && main_is_l3 && dmacs_bad[i])
1057 {
1058 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1059 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1060 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1061 }
1062 else
1063 nexts[i] = last_unknown_next;
1064 }
1065
1066 /* next */
1067 n_left--;
1068 si++;
1069 }
1070
1071 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1072 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1073 }
1074
1075 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
Damjan Marion650223c2018-11-14 16:55:53 +01001076}
1077
1078static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001079eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1080 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1081 int ip4_cksum_ok)
Damjan Marion650223c2018-11-14 16:55:53 +01001082{
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001083 ethernet_main_t *em = &ethernet_main;
1084 ethernet_interface_t *ei;
1085 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1086 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1087 subint_config_t *subint0 = &intf0->untagged_subint;
Damjan Marion650223c2018-11-14 16:55:53 +01001088
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001089 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
John Lo4a302ee2020-05-12 22:34:39 -04001090 int int_is_l3 = ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3;
Damjan Marion650223c2018-11-14 16:55:53 +01001091
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001092 if (main_is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +01001093 {
John Lo4a302ee2020-05-12 22:34:39 -04001094 if (int_is_l3 || /* DMAC filter already done by NIC */
1095 ((hi->l2_if_count != 0) && (hi->l3_if_count == 0)))
1096 { /* All L2 usage - DMAC check not needed */
1097 eth_input_process_frame (vm, node, hi, from, n_pkts,
1098 /*is_l3 */ 1, ip4_cksum_ok, 0);
1099 }
Damjan Marion650223c2018-11-14 16:55:53 +01001100 else
John Lo4a302ee2020-05-12 22:34:39 -04001101 { /* DMAC check needed for L3 */
1102 eth_input_process_frame (vm, node, hi, from, n_pkts,
1103 /*is_l3 */ 1, ip4_cksum_ok, 1);
1104 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001105 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001106 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001107 else
Damjan Marion650223c2018-11-14 16:55:53 +01001108 {
John Lo4a302ee2020-05-12 22:34:39 -04001109 if (hi->l3_if_count == 0)
1110 { /* All L2 usage - DMAC check not needed */
1111 eth_input_process_frame (vm, node, hi, from, n_pkts,
1112 /*is_l3 */ 0, ip4_cksum_ok, 0);
1113 }
1114 else
1115 { /* DMAC check needed for L3 */
1116 eth_input_process_frame (vm, node, hi, from, n_pkts,
1117 /*is_l3 */ 0, ip4_cksum_ok, 1);
1118 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001119 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001120 }
1121}
1122
1123static_always_inline void
1124ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1125 vlib_frame_t * from_frame)
1126{
1127 u32 *from, n_left;
Benoît Ganne98477922019-04-10 14:21:11 +02001128 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
Damjan Marion650223c2018-11-14 16:55:53 +01001129 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001130 from = vlib_frame_vector_args (from_frame);
1131 n_left = from_frame->n_vectors;
Damjan Marion650223c2018-11-14 16:55:53 +01001132
Dave Barach5ecd5a52019-02-25 15:27:28 -05001133 while (n_left)
Damjan Marion650223c2018-11-14 16:55:53 +01001134 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001135 ethernet_input_trace_t *t0;
1136 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1137
1138 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1139 {
1140 t0 = vlib_add_trace (vm, node, b0,
1141 sizeof (ethernet_input_trace_t));
1142 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1143 sizeof (t0->packet_data));
1144 t0->frame_flags = from_frame->flags;
1145 clib_memcpy_fast (&t0->frame_data,
1146 vlib_frame_scalar_args (from_frame),
1147 sizeof (ethernet_input_frame_t));
1148 }
1149 from += 1;
1150 n_left -= 1;
Damjan Marion650223c2018-11-14 16:55:53 +01001151 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001152 }
1153
1154 /* rx pcap capture if enabled */
Dave Barach33909772019-09-23 10:27:27 -04001155 if (PREDICT_FALSE (vlib_global_main.pcap.pcap_rx_enable))
Dave Barach5ecd5a52019-02-25 15:27:28 -05001156 {
1157 u32 bi0;
Dave Barach33909772019-09-23 10:27:27 -04001158 vnet_pcap_t *pp = &vlib_global_main.pcap;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001159
1160 from = vlib_frame_vector_args (from_frame);
1161 n_left = from_frame->n_vectors;
1162 while (n_left > 0)
1163 {
Dave Barach9137e542019-09-13 17:47:50 -04001164 int classify_filter_result;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001165 vlib_buffer_t *b0;
1166 bi0 = from[0];
1167 from++;
Dave Barach9137e542019-09-13 17:47:50 -04001168 n_left--;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001169 b0 = vlib_get_buffer (vm, bi0);
Dave Barachf5667c32019-09-25 11:27:46 -04001170 if (pp->filter_classify_table_index != ~0)
Dave Barach9137e542019-09-13 17:47:50 -04001171 {
1172 classify_filter_result =
1173 vnet_is_packet_traced_inline
Dave Barachf5667c32019-09-25 11:27:46 -04001174 (b0, pp->filter_classify_table_index, 0 /* full classify */ );
Dave Barach9137e542019-09-13 17:47:50 -04001175 if (classify_filter_result)
Dave Barach33909772019-09-23 10:27:27 -04001176 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1177 pp->max_bytes_per_pkt);
Dave Barach9137e542019-09-13 17:47:50 -04001178 continue;
1179 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001180
Dave Barach33909772019-09-23 10:27:27 -04001181 if (pp->pcap_sw_if_index == 0 ||
1182 pp->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
Dave Barach5ecd5a52019-02-25 15:27:28 -05001183 {
Dave Barachd28437c2019-11-20 09:28:31 -05001184 vnet_main_t *vnm = vnet_get_main ();
1185 vnet_hw_interface_t *hi =
1186 vnet_get_sup_hw_interface
1187 (vnm, vnet_buffer (b0)->sw_if_index[VLIB_RX]);
1188
1189 /* Capture pkt if not filtered, or if filter hits */
1190 if (hi->trace_classify_table_index == ~0 ||
1191 vnet_is_packet_traced_inline
1192 (b0, hi->trace_classify_table_index,
1193 0 /* full classify */ ))
1194 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1195 pp->max_bytes_per_pkt);
Dave Barach5ecd5a52019-02-25 15:27:28 -05001196 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001197 }
Damjan Marion650223c2018-11-14 16:55:53 +01001198 }
1199}
1200
1201static_always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001202ethernet_input_inline (vlib_main_t * vm,
1203 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001204 u32 * from, u32 n_packets,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001205 ethernet_input_variant_t variant)
1206{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001207 vnet_main_t *vnm = vnet_get_main ();
1208 ethernet_main_t *em = &ethernet_main;
1209 vlib_node_runtime_t *error_node;
Damjan Marion650223c2018-11-14 16:55:53 +01001210 u32 n_left_from, next_index, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001211 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Damjan Marion067cd622018-07-11 12:47:43 +02001212 u32 thread_index = vm->thread_index;
Dave Barachcfba1e22016-11-16 10:23:50 -05001213 u32 cached_sw_if_index = ~0;
1214 u32 cached_is_l2 = 0; /* shut up gcc */
John Lo1904c472017-03-10 17:15:22 -05001215 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
Matthew Smith42bde452019-11-18 09:35:24 -06001216 ethernet_interface_t *ei = NULL;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001217 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1218 vlib_buffer_t **b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001219
1220 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1221 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1222 else
1223 error_node = node;
1224
Damjan Marion650223c2018-11-14 16:55:53 +01001225 n_left_from = n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001226
1227 next_index = node->cached_next_index;
1228 stats_sw_if_index = node->runtime_data[0];
1229 stats_n_packets = stats_n_bytes = 0;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001230 vlib_get_buffers (vm, from, bufs, n_left_from);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001231
1232 while (n_left_from > 0)
1233 {
1234 u32 n_left_to_next;
1235
1236 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1237
1238 while (n_left_from >= 4 && n_left_to_next >= 2)
1239 {
1240 u32 bi0, bi1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001241 vlib_buffer_t *b0, *b1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001242 u8 next0, next1, error0, error1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001243 u16 type0, orig_type0, type1, orig_type1;
1244 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1245 u32 match_flags0, match_flags1;
1246 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1247 new_sw_if_index1, len1;
1248 vnet_hw_interface_t *hi0, *hi1;
1249 main_intf_t *main_intf0, *main_intf1;
1250 vlan_intf_t *vlan_intf0, *vlan_intf1;
1251 qinq_intf_t *qinq_intf0, *qinq_intf1;
1252 u32 is_l20, is_l21;
Dave Barachcfba1e22016-11-16 10:23:50 -05001253 ethernet_header_t *e0, *e1;
Matthew Smith42bde452019-11-18 09:35:24 -06001254 u64 dmacs[2];
1255 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001256
1257 /* Prefetch next iteration. */
1258 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001259 vlib_prefetch_buffer_header (b[2], STORE);
1260 vlib_prefetch_buffer_header (b[3], STORE);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001261
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001262 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1263 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001264 }
1265
1266 bi0 = from[0];
1267 bi1 = from[1];
1268 to_next[0] = bi0;
1269 to_next[1] = bi1;
1270 from += 2;
1271 to_next += 2;
1272 n_left_to_next -= 2;
1273 n_left_from -= 2;
1274
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001275 b0 = b[0];
1276 b1 = b[1];
1277 b += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001278
1279 error0 = error1 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001280 e0 = vlib_buffer_get_current (b0);
1281 type0 = clib_net_to_host_u16 (e0->type);
1282 e1 = vlib_buffer_get_current (b1);
1283 type1 = clib_net_to_host_u16 (e1->type);
1284
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001285 /* Set the L2 header offset for all packets */
1286 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1287 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1288 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1289 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1290
John Locc532852016-12-14 15:42:45 -05001291 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001292 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
Damjan Marionc6969b52018-02-19 12:14:06 +01001293 && !ethernet_frame_is_any_tagged_x2 (type0,
1294 type1)))
Dave Barachcfba1e22016-11-16 10:23:50 -05001295 {
1296 main_intf_t *intf0;
1297 subint_config_t *subint0;
1298 u32 sw_if_index0, sw_if_index1;
1299
1300 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1301 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1302 is_l20 = cached_is_l2;
1303
1304 /* This is probably wholly unnecessary */
1305 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1306 goto slowpath;
1307
John Lo1904c472017-03-10 17:15:22 -05001308 /* Now sw_if_index0 == sw_if_index1 */
Dave Barachcfba1e22016-11-16 10:23:50 -05001309 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1310 {
1311 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001312 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001313 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001314 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001315 subint0 = &intf0->untagged_subint;
1316 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1317 }
John Lo7714b302016-12-20 16:59:02 -05001318
Dave Barachcfba1e22016-11-16 10:23:50 -05001319 if (PREDICT_TRUE (is_l20 != 0))
1320 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001321 vnet_buffer (b0)->l3_hdr_offset =
1322 vnet_buffer (b0)->l2_hdr_offset +
1323 sizeof (ethernet_header_t);
1324 vnet_buffer (b1)->l3_hdr_offset =
1325 vnet_buffer (b1)->l2_hdr_offset +
1326 sizeof (ethernet_header_t);
1327 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1328 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001329 next0 = em->l2_next;
1330 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001331 next1 = em->l2_next;
1332 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001333 }
John Locc532852016-12-14 15:42:45 -05001334 else
1335 {
Ivan Shvedunov72869432020-10-15 13:19:35 +03001336 if (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
John Lo4a302ee2020-05-12 22:34:39 -04001337 goto skip_dmac_check01;
1338
Matthew Smith42bde452019-11-18 09:35:24 -06001339 dmacs[0] = *(u64 *) e0;
1340 dmacs[1] = *(u64 *) e1;
1341
1342 if (ei && vec_len (ei->secondary_addrs))
1343 ethernet_input_inline_dmac_check (hi, dmacs,
1344 dmacs_bad,
1345 2 /* n_packets */ ,
1346 ei,
1347 1 /* have_sec_dmac */ );
1348 else
1349 ethernet_input_inline_dmac_check (hi, dmacs,
1350 dmacs_bad,
1351 2 /* n_packets */ ,
1352 ei,
1353 0 /* have_sec_dmac */ );
1354
1355 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001356 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001357 if (dmacs_bad[1])
John Lo1904c472017-03-10 17:15:22 -05001358 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001359
John Lo4a302ee2020-05-12 22:34:39 -04001360 skip_dmac_check01:
John Lob14826e2018-04-18 15:52:23 -04001361 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001362 determine_next_node (em, variant, 0, type0, b0,
1363 &error0, &next0);
John Lob14826e2018-04-18 15:52:23 -04001364 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001365 determine_next_node (em, variant, 0, type1, b1,
1366 &error1, &next1);
John Locc532852016-12-14 15:42:45 -05001367 }
1368 goto ship_it01;
Dave Barachcfba1e22016-11-16 10:23:50 -05001369 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001370
John Locc532852016-12-14 15:42:45 -05001371 /* Slow-path for the tagged case */
1372 slowpath:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001373 parse_header (variant,
1374 b0,
1375 &type0,
1376 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001377
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001378 parse_header (variant,
1379 b1,
1380 &type1,
1381 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001382
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001383 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1384 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001385
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001386 eth_vlan_table_lookups (em,
1387 vnm,
1388 old_sw_if_index0,
1389 orig_type0,
1390 outer_id0,
1391 inner_id0,
1392 &hi0,
1393 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001394
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001395 eth_vlan_table_lookups (em,
1396 vnm,
1397 old_sw_if_index1,
1398 orig_type1,
1399 outer_id1,
1400 inner_id1,
1401 &hi1,
1402 &main_intf1, &vlan_intf1, &qinq_intf1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001403
Ivan Shvedunov72869432020-10-15 13:19:35 +03001404 identify_subint (em,
1405 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001406 b0,
1407 match_flags0,
1408 main_intf0,
1409 vlan_intf0,
1410 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001411
Ivan Shvedunov72869432020-10-15 13:19:35 +03001412 identify_subint (em,
1413 hi1,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001414 b1,
1415 match_flags1,
1416 main_intf1,
1417 vlan_intf1,
1418 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001419
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001420 // Save RX sw_if_index for later nodes
1421 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1422 error0 !=
1423 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1424 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1425 error1 !=
1426 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001427
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001428 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1429 if (((new_sw_if_index0 != ~0)
1430 && (new_sw_if_index0 != old_sw_if_index0))
1431 || ((new_sw_if_index1 != ~0)
1432 && (new_sw_if_index1 != old_sw_if_index1)))
1433 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001434
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001435 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001436 - vnet_buffer (b0)->l2_hdr_offset;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001437 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001438 - vnet_buffer (b1)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001439
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001440 stats_n_packets += 2;
1441 stats_n_bytes += len0 + len1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001442
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001443 if (PREDICT_FALSE
1444 (!(new_sw_if_index0 == stats_sw_if_index
1445 && new_sw_if_index1 == stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001446 {
1447 stats_n_packets -= 2;
1448 stats_n_bytes -= len0 + len1;
1449
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001450 if (new_sw_if_index0 != old_sw_if_index0
1451 && new_sw_if_index0 != ~0)
1452 vlib_increment_combined_counter (vnm->
1453 interface_main.combined_sw_if_counters
1454 +
1455 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001456 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001457 new_sw_if_index0, 1,
1458 len0);
1459 if (new_sw_if_index1 != old_sw_if_index1
1460 && new_sw_if_index1 != ~0)
1461 vlib_increment_combined_counter (vnm->
1462 interface_main.combined_sw_if_counters
1463 +
1464 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001465 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001466 new_sw_if_index1, 1,
1467 len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001468
1469 if (new_sw_if_index0 == new_sw_if_index1)
1470 {
1471 if (stats_n_packets > 0)
1472 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001473 vlib_increment_combined_counter
1474 (vnm->interface_main.combined_sw_if_counters
1475 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001476 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001477 stats_sw_if_index,
1478 stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001479 stats_n_packets = stats_n_bytes = 0;
1480 }
1481 stats_sw_if_index = new_sw_if_index0;
1482 }
1483 }
1484 }
1485
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001486 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1487 is_l20 = is_l21 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001488
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001489 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1490 &next0);
1491 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1492 &next1);
1493
John Lo1904c472017-03-10 17:15:22 -05001494 ship_it01:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001495 b0->error = error_node->errors[error0];
1496 b1->error = error_node->errors[error1];
1497
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001498 // verify speculative enqueue
1499 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1500 n_left_to_next, bi0, bi1, next0,
1501 next1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001502 }
1503
1504 while (n_left_from > 0 && n_left_to_next > 0)
1505 {
1506 u32 bi0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001507 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001508 u8 error0, next0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001509 u16 type0, orig_type0;
1510 u16 outer_id0, inner_id0;
1511 u32 match_flags0;
1512 u32 old_sw_if_index0, new_sw_if_index0, len0;
1513 vnet_hw_interface_t *hi0;
1514 main_intf_t *main_intf0;
1515 vlan_intf_t *vlan_intf0;
1516 qinq_intf_t *qinq_intf0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001517 ethernet_header_t *e0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001518 u32 is_l20;
Matthew Smith42bde452019-11-18 09:35:24 -06001519 u64 dmacs[2];
1520 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001521
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001522 // Prefetch next iteration
1523 if (n_left_from > 1)
1524 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001525 vlib_prefetch_buffer_header (b[1], STORE);
1526 CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001527 }
1528
1529 bi0 = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001530 to_next[0] = bi0;
1531 from += 1;
1532 to_next += 1;
1533 n_left_from -= 1;
1534 n_left_to_next -= 1;
1535
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001536 b0 = b[0];
1537 b += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001538
1539 error0 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001540 e0 = vlib_buffer_get_current (b0);
1541 type0 = clib_net_to_host_u16 (e0->type);
1542
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001543 /* Set the L2 header offset for all packets */
1544 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1545 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1546
John Locc532852016-12-14 15:42:45 -05001547 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001548 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1549 && !ethernet_frame_is_tagged (type0)))
1550 {
1551 main_intf_t *intf0;
1552 subint_config_t *subint0;
1553 u32 sw_if_index0;
1554
1555 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1556 is_l20 = cached_is_l2;
1557
1558 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1559 {
1560 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001561 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001562 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001563 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001564 subint0 = &intf0->untagged_subint;
1565 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1566 }
John Lo7714b302016-12-20 16:59:02 -05001567
John Lo7714b302016-12-20 16:59:02 -05001568
Dave Barachcfba1e22016-11-16 10:23:50 -05001569 if (PREDICT_TRUE (is_l20 != 0))
1570 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001571 vnet_buffer (b0)->l3_hdr_offset =
1572 vnet_buffer (b0)->l2_hdr_offset +
1573 sizeof (ethernet_header_t);
1574 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001575 next0 = em->l2_next;
1576 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001577 }
John Locc532852016-12-14 15:42:45 -05001578 else
1579 {
Ivan Shvedunov72869432020-10-15 13:19:35 +03001580 if (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
John Lo4a302ee2020-05-12 22:34:39 -04001581 goto skip_dmac_check0;
1582
Matthew Smith42bde452019-11-18 09:35:24 -06001583 dmacs[0] = *(u64 *) e0;
1584
1585 if (ei && vec_len (ei->secondary_addrs))
1586 ethernet_input_inline_dmac_check (hi, dmacs,
1587 dmacs_bad,
1588 1 /* n_packets */ ,
1589 ei,
1590 1 /* have_sec_dmac */ );
1591 else
1592 ethernet_input_inline_dmac_check (hi, dmacs,
1593 dmacs_bad,
1594 1 /* n_packets */ ,
1595 ei,
1596 0 /* have_sec_dmac */ );
1597
1598 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001599 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001600
John Lo4a302ee2020-05-12 22:34:39 -04001601 skip_dmac_check0:
Andrew Yourtchenkoe78bca12018-10-10 16:15:55 +02001602 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001603 determine_next_node (em, variant, 0, type0, b0,
1604 &error0, &next0);
John Locc532852016-12-14 15:42:45 -05001605 }
1606 goto ship_it0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001607 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001608
John Locc532852016-12-14 15:42:45 -05001609 /* Slow-path for the tagged case */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001610 parse_header (variant,
1611 b0,
1612 &type0,
1613 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001614
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001615 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001616
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001617 eth_vlan_table_lookups (em,
1618 vnm,
1619 old_sw_if_index0,
1620 orig_type0,
1621 outer_id0,
1622 inner_id0,
1623 &hi0,
1624 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001625
Ivan Shvedunov72869432020-10-15 13:19:35 +03001626 identify_subint (em,
1627 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001628 b0,
1629 match_flags0,
1630 main_intf0,
1631 vlan_intf0,
1632 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001633
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001634 // Save RX sw_if_index for later nodes
1635 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1636 error0 !=
1637 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001638
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001639 // Increment subinterface stats
1640 // Note that interface-level counters have already been incremented
1641 // prior to calling this function. Thus only subinterface counters
1642 // are incremented here.
1643 //
Damjan Marion607de1a2016-08-16 22:53:54 +02001644 // Interface level counters include packets received on the main
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001645 // interface and all subinterfaces. Subinterface level counters
1646 // include only those packets received on that subinterface
Ed Warnickecb9cada2015-12-08 15:45:58 -07001647 // Increment stats if the subint is valid and it is not the main intf
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001648 if ((new_sw_if_index0 != ~0)
1649 && (new_sw_if_index0 != old_sw_if_index0))
1650 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001651
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001652 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001653 - vnet_buffer (b0)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001654
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001655 stats_n_packets += 1;
1656 stats_n_bytes += len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001657
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001658 // Batch stat increments from the same subinterface so counters
Damjan Marion607de1a2016-08-16 22:53:54 +02001659 // don't need to be incremented for every packet.
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001660 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1661 {
1662 stats_n_packets -= 1;
1663 stats_n_bytes -= len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001664
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001665 if (new_sw_if_index0 != ~0)
1666 vlib_increment_combined_counter
1667 (vnm->interface_main.combined_sw_if_counters
1668 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001669 thread_index, new_sw_if_index0, 1, len0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001670 if (stats_n_packets > 0)
1671 {
1672 vlib_increment_combined_counter
1673 (vnm->interface_main.combined_sw_if_counters
1674 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001675 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001676 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1677 stats_n_packets = stats_n_bytes = 0;
1678 }
1679 stats_sw_if_index = new_sw_if_index0;
1680 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001681 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001682
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001683 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1684 is_l20 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001685
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001686 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1687 &next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001688
John Lo1904c472017-03-10 17:15:22 -05001689 ship_it0:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001690 b0->error = error_node->errors[error0];
1691
1692 // verify speculative enqueue
Ed Warnickecb9cada2015-12-08 15:45:58 -07001693 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1694 to_next, n_left_to_next,
1695 bi0, next0);
1696 }
1697
1698 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1699 }
1700
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001701 // Increment any remaining batched stats
1702 if (stats_n_packets > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001703 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001704 vlib_increment_combined_counter
1705 (vnm->interface_main.combined_sw_if_counters
1706 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001707 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001708 node->runtime_data[0] = stats_sw_if_index;
1709 }
Damjan Marion650223c2018-11-14 16:55:53 +01001710}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001711
Damjan Marion5beecec2018-09-10 13:09:21 +02001712VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1713 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001714 vlib_frame_t * frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001715{
Damjan Marion650223c2018-11-14 16:55:53 +01001716 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001717 u32 *from = vlib_frame_vector_args (frame);
1718 u32 n_packets = frame->n_vectors;
1719
1720 ethernet_input_trace (vm, node, frame);
1721
1722 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1723 {
Damjan Marion650223c2018-11-14 16:55:53 +01001724 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
Damjan Marion650223c2018-11-14 16:55:53 +01001725 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001726 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1727 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
Damjan Marion650223c2018-11-14 16:55:53 +01001728 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001729 else
1730 ethernet_input_inline (vm, node, from, n_packets,
1731 ETHERNET_INPUT_VARIANT_ETHERNET);
Damjan Marion650223c2018-11-14 16:55:53 +01001732 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001733}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001734
Damjan Marion5beecec2018-09-10 13:09:21 +02001735VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1736 vlib_node_runtime_t * node,
1737 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001738{
Damjan Marion650223c2018-11-14 16:55:53 +01001739 u32 *from = vlib_frame_vector_args (from_frame);
1740 u32 n_packets = from_frame->n_vectors;
1741 ethernet_input_trace (vm, node, from_frame);
1742 ethernet_input_inline (vm, node, from, n_packets,
1743 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1744 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001745}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001746
Damjan Marion5beecec2018-09-10 13:09:21 +02001747VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1748 vlib_node_runtime_t * node,
1749 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001750{
Damjan Marion650223c2018-11-14 16:55:53 +01001751 u32 *from = vlib_frame_vector_args (from_frame);
1752 u32 n_packets = from_frame->n_vectors;
1753 ethernet_input_trace (vm, node, from_frame);
1754 ethernet_input_inline (vm, node, from, n_packets,
1755 ETHERNET_INPUT_VARIANT_NOT_L2);
1756 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001757}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001758
1759
1760// Return the subinterface config struct for the given sw_if_index
1761// Also return via parameter the appropriate match flags for the
1762// configured number of tags.
1763// On error (unsupported or not ethernet) return 0.
1764static subint_config_t *
1765ethernet_sw_interface_get_config (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001766 u32 sw_if_index,
1767 u32 * flags, u32 * unsupported)
1768{
1769 ethernet_main_t *em = &ethernet_main;
1770 vnet_hw_interface_t *hi;
1771 vnet_sw_interface_t *si;
1772 main_intf_t *main_intf;
1773 vlan_table_t *vlan_table;
1774 qinq_table_t *qinq_table;
1775 subint_config_t *subint = 0;
1776
Ed Warnickecb9cada2015-12-08 15:45:58 -07001777 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1778
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001779 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1780 {
1781 *unsupported = 0;
1782 goto done; // non-ethernet interface
1783 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001784
1785 // ensure there's an entry for the main intf (shouldn't really be necessary)
1786 vec_validate (em->main_intfs, hi->hw_if_index);
1787 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1788
1789 // Locate the subint for the given ethernet config
1790 si = vnet_get_sw_interface (vnm, sw_if_index);
1791
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001792 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1793 {
1794 p2p_ethernet_main_t *p2pm = &p2p_main;
1795 u32 p2pe_sw_if_index =
1796 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1797 if (p2pe_sw_if_index == ~0)
1798 {
1799 pool_get (p2pm->p2p_subif_pool, subint);
1800 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1801 }
1802 else
1803 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1804 *flags = SUBINT_CONFIG_P2P;
1805 }
Neale Ranns17ff3c12018-07-04 10:24:24 -07001806 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1807 {
1808 pipe_t *pipe;
1809
1810 pipe = pipe_get (sw_if_index);
1811 subint = &pipe->subint;
1812 *flags = SUBINT_CONFIG_P2P;
1813 }
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001814 else if (si->sub.eth.flags.default_sub)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001815 {
1816 subint = &main_intf->default_subint;
Mike Bly88076742018-09-24 10:13:06 -07001817 *flags = SUBINT_CONFIG_MATCH_1_TAG |
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001818 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1819 }
1820 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1821 {
1822 // if no flags are set then this is a main interface
1823 // so treat as untagged
1824 subint = &main_intf->untagged_subint;
1825 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1826 }
1827 else
1828 {
1829 // one or two tags
1830 // first get the vlan table
1831 if (si->sub.eth.flags.dot1ad)
1832 {
1833 if (main_intf->dot1ad_vlans == 0)
1834 {
1835 // Allocate a vlan table from the pool
1836 pool_get (em->vlan_pool, vlan_table);
1837 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1838 }
1839 else
1840 {
1841 // Get ptr to existing vlan table
1842 vlan_table =
1843 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1844 }
1845 }
1846 else
1847 { // dot1q
1848 if (main_intf->dot1q_vlans == 0)
1849 {
1850 // Allocate a vlan table from the pool
1851 pool_get (em->vlan_pool, vlan_table);
1852 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1853 }
1854 else
1855 {
1856 // Get ptr to existing vlan table
1857 vlan_table =
1858 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1859 }
1860 }
1861
1862 if (si->sub.eth.flags.one_tag)
1863 {
1864 *flags = si->sub.eth.flags.exact_match ?
1865 SUBINT_CONFIG_MATCH_1_TAG :
1866 (SUBINT_CONFIG_MATCH_1_TAG |
1867 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1868
1869 if (si->sub.eth.flags.outer_vlan_id_any)
1870 {
1871 // not implemented yet
1872 *unsupported = 1;
1873 goto done;
1874 }
1875 else
1876 {
1877 // a single vlan, a common case
1878 subint =
1879 &vlan_table->vlans[si->sub.eth.
1880 outer_vlan_id].single_tag_subint;
1881 }
1882
1883 }
1884 else
1885 {
1886 // Two tags
1887 *flags = si->sub.eth.flags.exact_match ?
1888 SUBINT_CONFIG_MATCH_2_TAG :
1889 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1890
1891 if (si->sub.eth.flags.outer_vlan_id_any
1892 && si->sub.eth.flags.inner_vlan_id_any)
1893 {
1894 // not implemented yet
1895 *unsupported = 1;
1896 goto done;
1897 }
1898
1899 if (si->sub.eth.flags.inner_vlan_id_any)
1900 {
1901 // a specific outer and "any" inner
1902 // don't need a qinq table for this
1903 subint =
1904 &vlan_table->vlans[si->sub.eth.
1905 outer_vlan_id].inner_any_subint;
1906 if (si->sub.eth.flags.exact_match)
1907 {
1908 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1909 }
1910 else
1911 {
1912 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1913 SUBINT_CONFIG_MATCH_3_TAG;
1914 }
1915 }
1916 else
1917 {
1918 // a specific outer + specifc innner vlan id, a common case
1919
1920 // get the qinq table
1921 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1922 {
1923 // Allocate a qinq table from the pool
1924 pool_get (em->qinq_pool, qinq_table);
1925 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1926 qinq_table - em->qinq_pool;
1927 }
1928 else
1929 {
1930 // Get ptr to existing qinq table
1931 qinq_table =
1932 vec_elt_at_index (em->qinq_pool,
1933 vlan_table->vlans[si->sub.
1934 eth.outer_vlan_id].
1935 qinqs);
1936 }
1937 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1938 }
1939 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001940 }
1941
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001942done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001943 return subint;
1944}
1945
Damjan Marion5beecec2018-09-10 13:09:21 +02001946static clib_error_t *
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001947ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001948{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001949 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001950 u32 placeholder_flags;
1951 u32 placeholder_unsup;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001952 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001953
1954 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001955 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001956 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1957 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001958
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001959 if (subint == 0)
1960 {
1961 // not implemented yet or not ethernet
1962 goto done;
1963 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001964
1965 subint->sw_if_index =
1966 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1967
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001968done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001969 return error;
1970}
1971
1972VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1973
1974
Damjan Marion5beecec2018-09-10 13:09:21 +02001975#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -07001976// Set the L2/L3 mode for the subinterface
1977void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001978ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001979{
1980 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001981 u32 placeholder_flags;
1982 u32 placeholder_unsup;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001983 int is_port;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001984 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001985
1986 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1987
1988 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001989 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001990 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1991 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001992
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001993 if (subint == 0)
1994 {
1995 // unimplemented or not ethernet
1996 goto done;
1997 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001998
1999 // Double check that the config we found is for our interface (or the interface is down)
2000 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2001
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002002 if (l2)
2003 {
2004 subint->flags |= SUBINT_CONFIG_L2;
2005 if (is_port)
2006 subint->flags |=
2007 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
2008 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
2009 }
2010 else
2011 {
2012 subint->flags &= ~SUBINT_CONFIG_L2;
2013 if (is_port)
2014 subint->flags &=
2015 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
2016 | SUBINT_CONFIG_MATCH_3_TAG);
2017 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002018
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002019done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002020 return;
2021}
2022
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002023/*
2024 * Set the L2/L3 mode for the subinterface regardless of port
2025 */
2026void
2027ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002028 u32 sw_if_index, u32 l2)
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002029{
2030 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04002031 u32 placeholder_flags;
2032 u32 placeholder_unsup;
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002033
2034 /* Find the config for this subinterface */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002035 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04002036 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
2037 &placeholder_unsup);
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002038
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002039 if (subint == 0)
2040 {
2041 /* unimplemented or not ethernet */
2042 goto done;
2043 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002044
2045 /*
2046 * Double check that the config we found is for our interface (or the
2047 * interface is down)
2048 */
2049 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2050
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002051 if (l2)
2052 {
2053 subint->flags |= SUBINT_CONFIG_L2;
2054 }
2055 else
2056 {
2057 subint->flags &= ~SUBINT_CONFIG_L2;
2058 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002059
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002060done:
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002061 return;
2062}
Damjan Marion5beecec2018-09-10 13:09:21 +02002063#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07002064
2065static clib_error_t *
2066ethernet_sw_interface_add_del (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002067 u32 sw_if_index, u32 is_create)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002068{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002069 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002070 subint_config_t *subint;
2071 u32 match_flags;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002072 u32 unsupported = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002073
2074 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002075 subint =
2076 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
2077 &unsupported);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002078
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002079 if (subint == 0)
2080 {
2081 // not implemented yet or not ethernet
2082 if (unsupported)
2083 {
Damjan Marion607de1a2016-08-16 22:53:54 +02002084 // this is the NYI case
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002085 error = clib_error_return (0, "not implemented yet");
2086 }
2087 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002088 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002089
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002090 if (!is_create)
2091 {
2092 subint->flags = 0;
2093 return error;
2094 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002095
2096 // Initialize the subint
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002097 if (subint->flags & SUBINT_CONFIG_VALID)
2098 {
2099 // Error vlan already in use
2100 error = clib_error_return (0, "vlan is already in use");
2101 }
2102 else
2103 {
Neale Ranns17ff3c12018-07-04 10:24:24 -07002104 // Note that config is L3 by default
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002105 subint->flags = SUBINT_CONFIG_VALID | match_flags;
2106 subint->sw_if_index = ~0; // because interfaces are initially down
2107 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002108
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002109done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002110 return error;
2111}
2112
2113VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2114
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002115static char *ethernet_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002116#define ethernet_error(n,c,s) s,
2117#include "error.def"
2118#undef ethernet_error
2119};
2120
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002121/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002122VLIB_REGISTER_NODE (ethernet_input_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002123 .name = "ethernet-input",
2124 /* Takes a vector of packets. */
2125 .vector_size = sizeof (u32),
Damjan Marion650223c2018-11-14 16:55:53 +01002126 .scalar_size = sizeof (ethernet_input_frame_t),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002127 .n_errors = ETHERNET_N_ERROR,
2128 .error_strings = ethernet_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002129 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2130 .next_nodes = {
2131#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2132 foreach_ethernet_input_next
2133#undef _
2134 },
Ed Warnickecb9cada2015-12-08 15:45:58 -07002135 .format_buffer = format_ethernet_header_with_length,
2136 .format_trace = format_ethernet_input_trace,
2137 .unformat_buffer = unformat_ethernet_header,
2138};
2139
Damjan Marion5beecec2018-09-10 13:09:21 +02002140VLIB_REGISTER_NODE (ethernet_input_type_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002141 .name = "ethernet-input-type",
2142 /* Takes a vector of packets. */
2143 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002144 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2145 .next_nodes = {
2146#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2147 foreach_ethernet_input_next
2148#undef _
2149 },
2150};
2151
Damjan Marion5beecec2018-09-10 13:09:21 +02002152VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002153 .name = "ethernet-input-not-l2",
2154 /* Takes a vector of packets. */
2155 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002156 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2157 .next_nodes = {
2158#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2159 foreach_ethernet_input_next
2160#undef _
2161 },
2162};
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002163/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002164
Damjan Marion5beecec2018-09-10 13:09:21 +02002165#ifndef CLIB_MARCH_VARIANT
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002166void
2167ethernet_set_rx_redirect (vnet_main_t * vnm,
2168 vnet_hw_interface_t * hi, u32 enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002169{
2170 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2171 // don't go directly to ip4-input)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002172 vnet_hw_interface_rx_redirect_to_node
2173 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002174}
2175
2176
2177/*
2178 * Initialization and registration for the next_by_ethernet structure
2179 */
2180
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002181clib_error_t *
2182next_by_ethertype_init (next_by_ethertype_t * l3_next)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002183{
2184 l3_next->input_next_by_type = sparse_vec_new
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002185 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002186 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2187
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002188 vec_validate (l3_next->sparse_index_by_input_next_index,
2189 ETHERNET_INPUT_NEXT_DROP);
2190 vec_validate (l3_next->sparse_index_by_input_next_index,
2191 ETHERNET_INPUT_NEXT_PUNT);
2192 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2193 SPARSE_VEC_INVALID_INDEX;
2194 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2195 SPARSE_VEC_INVALID_INDEX;
2196
Damjan Marion607de1a2016-08-16 22:53:54 +02002197 /*
2198 * Make sure we don't wipe out an ethernet registration by mistake
Dave Barach1f49ed62016-02-24 11:29:06 -05002199 * Can happen if init function ordering constraints are missing.
2200 */
2201 if (CLIB_DEBUG > 0)
2202 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002203 ethernet_main_t *em = &ethernet_main;
2204 ASSERT (em->next_by_ethertype_register_called == 0);
Dave Barach1f49ed62016-02-24 11:29:06 -05002205 }
2206
Ed Warnickecb9cada2015-12-08 15:45:58 -07002207 return 0;
2208}
2209
2210// Add an ethertype -> next index mapping to the structure
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002211clib_error_t *
2212next_by_ethertype_register (next_by_ethertype_t * l3_next,
2213 u32 ethertype, u32 next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002214{
2215 u32 i;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002216 u16 *n;
2217 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002218
Dave Barach1f49ed62016-02-24 11:29:06 -05002219 if (CLIB_DEBUG > 0)
2220 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002221 ethernet_main_t *em = &ethernet_main;
Dave Barach1f49ed62016-02-24 11:29:06 -05002222 em->next_by_ethertype_register_called = 1;
2223 }
2224
Ed Warnickecb9cada2015-12-08 15:45:58 -07002225 /* Setup ethernet type -> next index sparse vector mapping. */
2226 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2227 n[0] = next_index;
2228
2229 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2230 is updated. */
2231 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2232 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002233 l3_next->
2234 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002235
2236 // do not allow the cached next index's to be updated if L3
2237 // redirect is enabled, as it will have overwritten them
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002238 if (!em->redirect_l3)
2239 {
2240 // Cache common ethertypes directly
2241 if (ethertype == ETHERNET_TYPE_IP4)
2242 {
2243 l3_next->input_next_ip4 = next_index;
2244 }
2245 else if (ethertype == ETHERNET_TYPE_IP6)
2246 {
2247 l3_next->input_next_ip6 = next_index;
2248 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002249 else if (ethertype == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002250 {
2251 l3_next->input_next_mpls = next_index;
2252 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002253 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002254 return 0;
2255}
2256
Dave Barachf8d50682019-05-14 18:01:44 -04002257void
2258ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002259{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002260 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2261 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002262
2263 ethernet_setup_node (vm, ethernet_input_node.index);
2264 ethernet_setup_node (vm, ethernet_input_type_node.index);
2265 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2266
2267 next_by_ethertype_init (&em->l3_next);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002268
Ed Warnickecb9cada2015-12-08 15:45:58 -07002269 // Initialize pools and vector for vlan parsing
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002270 vec_validate (em->main_intfs, 10); // 10 main interfaces
2271 pool_alloc (em->vlan_pool, 10);
2272 pool_alloc (em->qinq_pool, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002273
2274 // The first vlan pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002275 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002276 // The first qinq pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002277 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002278}
2279
Ed Warnickecb9cada2015-12-08 15:45:58 -07002280void
2281ethernet_register_input_type (vlib_main_t * vm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002282 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002283{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002284 ethernet_main_t *em = &ethernet_main;
2285 ethernet_type_info_t *ti;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002286 u32 i;
2287
2288 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002289 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002290 if (error)
2291 clib_error_report (error);
2292 }
2293
2294 ti = ethernet_get_type_info (em, type);
Dave Barach4bda2d92019-07-03 15:21:50 -04002295 if (ti == 0)
2296 {
2297 clib_warning ("type_info NULL for type %d", type);
2298 return;
2299 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002300 ti->node_index = node_index;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002301 ti->next_index = vlib_node_add_next (vm,
2302 ethernet_input_node.index, node_index);
2303 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002304 ASSERT (i == ti->next_index);
2305
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002306 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002307 ASSERT (i == ti->next_index);
2308
2309 // Add the L3 node for this ethertype to the next nodes structure
2310 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2311
2312 // Call the registration functions for other nodes that want a mapping
2313 l2bvi_register_input_type (vm, type, node_index);
2314}
2315
2316void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002317ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002318{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002319 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002320 u32 i;
2321
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002322 em->l2_next =
2323 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002324
Damjan Marion607de1a2016-08-16 22:53:54 +02002325 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002326 * Even if we never use these arcs, we have to align the next indices...
2327 */
2328 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2329
2330 ASSERT (i == em->l2_next);
2331
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002332 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002333 ASSERT (i == em->l2_next);
2334}
2335
2336// Register a next node for L3 redirect, and enable L3 redirect
2337void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002338ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002339{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002340 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002341 u32 i;
2342
2343 em->redirect_l3 = 1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002344 em->redirect_l3_next = vlib_node_add_next (vm,
2345 ethernet_input_node.index,
2346 node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002347 /*
2348 * Change the cached next nodes to the redirect node
2349 */
2350 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2351 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2352 em->l3_next.input_next_mpls = em->redirect_l3_next;
2353
2354 /*
2355 * Even if we never use these arcs, we have to align the next indices...
2356 */
2357 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2358
2359 ASSERT (i == em->redirect_l3_next);
jerryianff82ed62016-12-05 17:13:00 +08002360
2361 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2362
2363 ASSERT (i == em->redirect_l3_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002364}
Damjan Marion5beecec2018-09-10 13:09:21 +02002365#endif
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002366
2367/*
2368 * fd.io coding-style-patch-verification: ON
2369 *
2370 * Local Variables:
2371 * eval: (c-set-style "gnu")
2372 * End:
2373 */