blob: f470c1c7b46d83569ad451a6f3e93cd82a4f9977 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Damjan Marion650223c2018-11-14 16:55:53 +01002 * Copyright (c) 2018 Cisco and/or its affiliates.
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ethernet_node.c: ethernet packet processing
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vnet/pg/pg.h>
42#include <vnet/ethernet/ethernet.h>
Pavel Kotucek15ac81c2017-06-20 14:00:26 +020043#include <vnet/ethernet/p2p_ethernet.h>
Neale Ranns17ff3c12018-07-04 10:24:24 -070044#include <vnet/devices/pipe/pipe.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045#include <vppinfra/sparse_vec.h>
46#include <vnet/l2/l2_bvi.h>
BenoƮt Ganne30a81952021-02-26 13:47:41 +010047#include <vnet/classify/pcap_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
Damjan Marion650223c2018-11-14 16:55:53 +010052 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070056typedef enum
57{
Ed Warnickecb9cada2015-12-08 15:45:58 -070058#define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
60#undef _
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070061 ETHERNET_INPUT_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070062} ethernet_input_next_t;
63
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070064typedef struct
65{
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 u8 packet_data[32];
Damjan Marion650223c2018-11-14 16:55:53 +010067 u16 frame_flags;
68 ethernet_input_frame_t frame_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} ethernet_input_trace_t;
70
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070071static u8 *
72format_ethernet_input_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070076 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
Damjan Marion650223c2018-11-14 16:55:53 +010077 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078
Damjan Marion650223c2018-11-14 16:55:53 +010079 if (t->frame_flags)
80 {
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 s = format (s, "%U", format_ethernet_header, t->packet_data);
88
89 return s;
90}
91
Damjan Marione849da22018-09-12 13:32:01 +020092extern vlib_node_registration_t ethernet_input_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070094typedef enum
95{
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
Ed Warnickecb9cada2015-12-08 15:45:58 -070098 ETHERNET_INPUT_VARIANT_NOT_L2,
99} ethernet_input_variant_t;
100
101
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102// Parse the ethernet header to extract vlan tags and innermost ethertype
103static_always_inline void
104parse_header (ethernet_input_variant_t variant,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700105 vlib_buffer_t * b0,
106 u16 * type,
107 u16 * orig_type,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
109{
Chris Luke194ebc52016-04-25 14:26:55 -0400110 u8 vlan_count;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700111
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
114 {
115 ethernet_header_t *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116
Zhiyong Yang9f833582020-04-11 14:36:55 +0000117 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Damjan Marion072401e2017-07-13 18:53:27 +0200119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
Steven35de3b32017-12-03 23:40:54 -0800120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700122 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700124 *type = clib_net_to_host_u16 (e0->type);
125 }
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
127 {
128 // here when prior node was LLC/SNAP processing
129 u16 *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
Zhiyong Yang9f833582020-04-11 14:36:55 +0000131 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700133 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700135 *type = clib_net_to_host_u16 (e0[0]);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137
138 // save for distinguishing between dot1q and dot1ad later
139 *orig_type = *type;
140
141 // default the tags to 0 (used if there is no corresponding tag)
142 *outer_id = 0;
143 *inner_id = 0;
144
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
Chris Luke194ebc52016-04-25 14:26:55 -0400146 vlan_count = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 // check for vlan encaps
Damjan Marionb94bdad2016-09-19 11:32:03 +0200149 if (ethernet_frame_is_tagged (*type))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700150 {
151 ethernet_vlan_header_t *h0;
152 u16 tag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155
Zhiyong Yang9f833582020-04-11 14:36:55 +0000156 h0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
159
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700160 *outer_id = tag & 0xfff;
Neale Ranns30d0fd42017-05-30 07:30:04 -0700161 if (0 == *outer_id)
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700164 *type = clib_net_to_host_u16 (h0->type);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165
166 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700167 vlan_count = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700169 if (*type == ETHERNET_TYPE_VLAN)
170 {
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
173
Zhiyong Yang9f833582020-04-11 14:36:55 +0000174 h0 = vlib_buffer_get_current (b0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700175
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
177
178 *inner_id = tag & 0xfff;
179
180 *type = clib_net_to_host_u16 (h0->type);
181
182 vlib_buffer_advance (b0, sizeof (h0[0]));
183 vlan_count = 2;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700184 if (*type == ETHERNET_TYPE_VLAN)
185 {
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300188
189 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700190 vlan_count = 3; // "unknown" number, aka, 3-or-more
191 }
192 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700194 ethernet_buffer_set_vlan_count (b0, vlan_count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195}
196
Matthew Smith42bde452019-11-18 09:35:24 -0600197static_always_inline void
198ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
199 u64 * dmacs, u8 * dmacs_bad,
200 u32 n_packets, ethernet_interface_t * ei,
201 u8 have_sec_dmac);
202
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203// Determine the subinterface for this packet, given the result of the
204// vlan table lookups and vlan header parsing. Check the most specific
205// matches first.
206static_always_inline void
Ivan Shvedunov72869432020-10-15 13:19:35 +0300207identify_subint (ethernet_main_t * em,
208 vnet_hw_interface_t * hi,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700209 vlib_buffer_t * b0,
210 u32 match_flags,
211 main_intf_t * main_intf,
212 vlan_intf_t * vlan_intf,
213 qinq_intf_t * qinq_intf,
214 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215{
216 u32 matched;
Ivan Shvedunov72869432020-10-15 13:19:35 +0300217 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
Damjan Marionddf6e082018-11-26 16:05:07 +0100219 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
220 qinq_intf, new_sw_if_index, error0, is_l2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700222 if (matched)
223 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700224 // Perform L3 my-mac filter
John Lo4a302ee2020-05-12 22:34:39 -0400225 // A unicast packet arriving on an L3 interface must have a dmac
226 // matching the interface mac. If interface has STATUS_L3 bit set
227 // mac filter is already done.
Ivan Shvedunov72869432020-10-15 13:19:35 +0300228 if (!(*is_l2 || (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700229 {
Matthew Smith42bde452019-11-18 09:35:24 -0600230 u64 dmacs[2];
231 u8 dmacs_bad[2];
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700232 ethernet_header_t *e0;
Matthew Smith42bde452019-11-18 09:35:24 -0600233 ethernet_interface_t *ei0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234
Matthew Smith42bde452019-11-18 09:35:24 -0600235 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
236 dmacs[0] = *(u64 *) e0;
237 ei0 = ethernet_get_interface (&ethernet_main, hi->hw_if_index);
238
239 if (ei0 && vec_len (ei0->secondary_addrs))
240 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
241 1 /* n_packets */ , ei0,
242 1 /* have_sec_dmac */ );
243 else
244 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
245 1 /* n_packets */ , ei0,
246 0 /* have_sec_dmac */ );
Matthew Smith42bde452019-11-18 09:35:24 -0600247 if (dmacs_bad[0])
248 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700249 }
250
251 // Check for down subinterface
252 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254}
255
256static_always_inline void
257determine_next_node (ethernet_main_t * em,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700258 ethernet_input_variant_t variant,
259 u32 is_l20,
260 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261{
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200262 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
263 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
264
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700265 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
266 {
267 // some error occurred
268 *next0 = ETHERNET_INPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700270 else if (is_l20)
271 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700272 // record the L2 len and reset the buffer so the L2 header is preserved
John Lob14826e2018-04-18 15:52:23 -0400273 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
274 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
275 *next0 = em->l2_next;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300276 ASSERT (vnet_buffer (b0)->l2.l2_len ==
277 ethernet_buffer_header_size (b0));
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200278 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700279
280 // check for common IP/MPLS ethertypes
281 }
282 else if (type0 == ETHERNET_TYPE_IP4)
283 {
284 *next0 = em->l3_next.input_next_ip4;
285 }
286 else if (type0 == ETHERNET_TYPE_IP6)
287 {
288 *next0 = em->l3_next.input_next_ip6;
289 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800290 else if (type0 == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700291 {
292 *next0 = em->l3_next.input_next_mpls;
293
294 }
295 else if (em->redirect_l3)
296 {
297 // L3 Redirect is on, the cached common next nodes will be
298 // pointing to the redirect node, catch the uncommon types here
299 *next0 = em->redirect_l3_next;
300 }
301 else
302 {
303 // uncommon ethertype, check table
304 u32 i0;
305 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
306 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
307 *error0 =
308 i0 ==
309 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
310
311 // The table is not populated with LLC values, so check that now.
Damjan Marion607de1a2016-08-16 22:53:54 +0200312 // If variant is variant_ethernet then we came from LLC processing. Don't
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700313 // go back there; drop instead using by keeping the drop/bad table result.
314 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
315 {
316 *next0 = ETHERNET_INPUT_NEXT_LLC;
317 }
318 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319}
320
Damjan Marion650223c2018-11-14 16:55:53 +0100321
322/* following vector code relies on following assumptions */
323STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
324STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
325STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
326STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
327 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
328 "l3_hdr_offset must follow l2_hdr_offset");
329
330static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100331eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100332{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100333 i16 adv = sizeof (ethernet_header_t);
334 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
335 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
336
Damjan Marion650223c2018-11-14 16:55:53 +0100337#ifdef CLIB_HAVE_VEC256
338 /* to reduce number of small loads/stores we are loading first 64 bits
339 of each buffer metadata into 256-bit register so we can advance
340 current_data, current_length and flags.
341 Observed saving of this code is ~2 clocks per packet */
342 u64x4 r, radv;
343
344 /* vector if signed 16 bit integers used in signed vector add operation
345 to advnce current_data and current_length */
346 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
347 i16x16 adv4 = {
348 adv, -adv, 0, 0, adv, -adv, 0, 0,
349 adv, -adv, 0, 0, adv, -adv, 0, 0
350 };
351
352 /* load 4 x 64 bits */
353 r = u64x4_gather (b[0], b[1], b[2], b[3]);
354
355 /* set flags */
356 r |= (u64x4) flags4;
357
358 /* advance buffer */
359 radv = (u64x4) ((i16x16) r + adv4);
360
361 /* write 4 x 64 bits */
362 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
363
364 /* use old current_data as l2_hdr_offset and new current_data as
365 l3_hdr_offset */
366 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
367
368 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
369 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
370 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
371 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
372 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
373
Damjan Marione9cebdf2018-11-21 00:47:42 +0100374 if (is_l3)
375 {
376 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
377 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
378 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
379 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
Damjan Marion650223c2018-11-14 16:55:53 +0100380
Damjan Marione9cebdf2018-11-21 00:47:42 +0100381 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
382 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
383 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
384 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
385 }
386 else
387 {
388 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
389 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
390 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
391 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
392
393 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
394 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
395 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
396 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
397 }
Damjan Marion650223c2018-11-14 16:55:53 +0100398
399#else
400 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
401 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
402 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
403 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
404 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
405 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
406 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
407 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
408
409 if (is_l3)
410 {
411 vlib_buffer_advance (b[0], adv);
412 vlib_buffer_advance (b[1], adv);
413 vlib_buffer_advance (b[2], adv);
414 vlib_buffer_advance (b[3], adv);
415 }
416
417 b[0]->flags |= flags;
418 b[1]->flags |= flags;
419 b[2]->flags |= flags;
420 b[3]->flags |= flags;
421#endif
422
423 if (!is_l3)
424 {
425 vnet_buffer (b[0])->l2.l2_len = adv;
426 vnet_buffer (b[1])->l2.l2_len = adv;
427 vnet_buffer (b[2])->l2.l2_len = adv;
428 vnet_buffer (b[3])->l2.l2_len = adv;
429 }
430}
431
432static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100433eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100434{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100435 i16 adv = sizeof (ethernet_header_t);
436 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
437 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
438
Damjan Marion650223c2018-11-14 16:55:53 +0100439 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
440 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
441
442 if (is_l3)
443 vlib_buffer_advance (b[0], adv);
444 b[0]->flags |= flags;
445 if (!is_l3)
446 vnet_buffer (b[0])->l2.l2_len = adv;
447}
448
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100449
Damjan Marion650223c2018-11-14 16:55:53 +0100450static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100451eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
452 u64 * dmacs, int offset, int dmac_check)
Damjan Marion650223c2018-11-14 16:55:53 +0100453{
Damjan Marion650223c2018-11-14 16:55:53 +0100454 ethernet_header_t *e;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100455 e = vlib_buffer_get_current (b[offset]);
456#ifdef CLIB_HAVE_VEC128
457 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
458 etype[offset] = ((u16x8) r)[3];
459 tags[offset] = r[1];
460#else
461 etype[offset] = e->type;
462 tags[offset] = *(u64 *) (e + 1);
463#endif
Damjan Marion650223c2018-11-14 16:55:53 +0100464
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100465 if (dmac_check)
466 dmacs[offset] = *(u64 *) e;
467}
Damjan Marion650223c2018-11-14 16:55:53 +0100468
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100469static_always_inline u16
470eth_input_next_by_type (u16 etype)
471{
472 ethernet_main_t *em = &ethernet_main;
473
474 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
475 vec_elt (em->l3_next.input_next_by_type,
476 sparse_vec_index (em->l3_next.input_next_by_type, etype));
477}
478
479typedef struct
480{
481 u64 tag, mask;
482 u32 sw_if_index;
483 u16 type, len, next;
484 i16 adv;
485 u8 err, n_tags;
486 u64 n_packets, n_bytes;
487} eth_input_tag_lookup_t;
488
489static_always_inline void
490eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
491 eth_input_tag_lookup_t * l)
492{
493 if (l->n_packets == 0 || l->sw_if_index == ~0)
494 return;
495
496 if (l->adv > 0)
497 l->n_bytes += l->n_packets * l->len;
498
499 vlib_increment_combined_counter
500 (vnm->interface_main.combined_sw_if_counters +
501 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
502 l->n_packets, l->n_bytes);
503}
504
505static_always_inline void
506eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
507 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
508 u64 tag, u16 * next, vlib_buffer_t * b,
509 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
510 int main_is_l3, int check_dmac)
511{
512 ethernet_main_t *em = &ethernet_main;
513
514 if ((tag ^ l->tag) & l->mask)
Damjan Marion650223c2018-11-14 16:55:53 +0100515 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100516 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
517 vlan_intf_t *vif;
518 qinq_intf_t *qif;
519 vlan_table_t *vlan_table;
520 qinq_table_t *qinq_table;
521 u16 *t = (u16 *) & tag;
522 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
523 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
524 u32 matched, is_l2, new_sw_if_index;
525
526 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
527 mif->dot1ad_vlans : mif->dot1q_vlans);
528 vif = &vlan_table->vlans[vlan1];
529 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
530 qif = &qinq_table->vlans[vlan2];
531 l->err = ETHERNET_ERROR_NONE;
532 l->type = clib_net_to_host_u16 (t[1]);
533
534 if (l->type == ETHERNET_TYPE_VLAN)
535 {
536 l->type = clib_net_to_host_u16 (t[3]);
537 l->n_tags = 2;
538 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
539 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
540 qif, &new_sw_if_index, &l->err,
541 &is_l2);
542 }
543 else
544 {
545 l->n_tags = 1;
546 if (vlan1 == 0)
547 {
548 new_sw_if_index = hi->sw_if_index;
549 l->err = ETHERNET_ERROR_NONE;
550 matched = 1;
551 is_l2 = main_is_l3 == 0;
552 }
553 else
554 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
555 SUBINT_CONFIG_MATCH_1_TAG, mif,
556 vif, qif, &new_sw_if_index,
557 &l->err, &is_l2);
558 }
559
560 if (l->sw_if_index != new_sw_if_index)
561 {
562 eth_input_update_if_counters (vm, vnm, l);
563 l->n_packets = 0;
564 l->n_bytes = 0;
565 l->sw_if_index = new_sw_if_index;
566 }
567 l->tag = tag;
568 l->mask = (l->n_tags == 2) ?
569 clib_net_to_host_u64 (0xffffffffffffffff) :
570 clib_net_to_host_u64 (0xffffffff00000000);
571
572 if (matched && l->sw_if_index == ~0)
573 l->err = ETHERNET_ERROR_DOWN;
574
575 l->len = sizeof (ethernet_header_t) +
576 l->n_tags * sizeof (ethernet_vlan_header_t);
577 if (main_is_l3)
578 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
579 l->n_tags * sizeof (ethernet_vlan_header_t);
580 else
581 l->adv = is_l2 ? 0 : l->len;
582
583 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
584 l->next = ETHERNET_INPUT_NEXT_DROP;
585 else if (is_l2)
586 l->next = em->l2_next;
587 else if (l->type == ETHERNET_TYPE_IP4)
588 l->next = em->l3_next.input_next_ip4;
589 else if (l->type == ETHERNET_TYPE_IP6)
590 l->next = em->l3_next.input_next_ip6;
591 else if (l->type == ETHERNET_TYPE_MPLS)
592 l->next = em->l3_next.input_next_mpls;
593 else if (em->redirect_l3)
594 l->next = em->redirect_l3_next;
595 else
596 {
597 l->next = eth_input_next_by_type (l->type);
598 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
599 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
600 }
601 }
602
603 if (check_dmac && l->adv > 0 && dmac_bad)
604 {
605 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
606 next[0] = ETHERNET_INPUT_NEXT_PUNT;
607 }
608 else
609 next[0] = l->next;
610
611 vlib_buffer_advance (b, l->adv);
612 vnet_buffer (b)->l2.l2_len = l->len;
613 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
614
615 if (l->err == ETHERNET_ERROR_NONE)
616 {
617 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
618 ethernet_buffer_set_vlan_count (b, l->n_tags);
619 }
620 else
621 b->error = node->errors[l->err];
622
623 /* update counters */
624 l->n_packets += 1;
625 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
626}
627
Matthew G Smithd459bf32019-09-04 15:01:04 -0500628#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
629#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
630
631#ifdef CLIB_HAVE_VEC256
632static_always_inline u32
633is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
634{
635 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
636 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
637 return u8x32_msb_mask ((u8x32) (r0));
638}
Matthew Smith42bde452019-11-18 09:35:24 -0600639#endif
640
Matthew G Smithd459bf32019-09-04 15:01:04 -0500641static_always_inline u8
642is_dmac_bad (u64 dmac, u64 hwaddr)
643{
644 u64 r0 = dmac & DMAC_MASK;
645 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
646}
Matthew G Smithd459bf32019-09-04 15:01:04 -0500647
648static_always_inline u8
649is_sec_dmac_bad (u64 dmac, u64 hwaddr)
650{
651 return ((dmac & DMAC_MASK) != hwaddr);
652}
653
654#ifdef CLIB_HAVE_VEC256
655static_always_inline u32
656is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
657{
658 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
659 r0 = (r0 != u64x4_splat (hwaddr));
660 return u8x32_msb_mask ((u8x32) (r0));
661}
662#endif
663
664static_always_inline u8
665eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
666{
667 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
668 return dmac_bad[0];
669}
670
671static_always_inline u32
672eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
673{
674#ifdef CLIB_HAVE_VEC256
675 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
676#else
677 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
678 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
679 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
680 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
681#endif
682 return *(u32 *) dmac_bad;
683}
684
Matthew Smith42bde452019-11-18 09:35:24 -0600685/*
686 * DMAC check for ethernet_input_inline()
687 *
688 * dmacs and dmacs_bad are arrays that are 2 elements long
689 * n_packets should be 1 or 2 for ethernet_input_inline()
690 */
691static_always_inline void
692ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
693 u64 * dmacs, u8 * dmacs_bad,
694 u32 n_packets, ethernet_interface_t * ei,
695 u8 have_sec_dmac)
696{
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200697 u64 hwaddr = ei->address.as_u64;
Matthew Smith42bde452019-11-18 09:35:24 -0600698 u8 bad = 0;
699
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200700 ASSERT (0 == ei->address.zero);
701
Matthew Smith42bde452019-11-18 09:35:24 -0600702 dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
703 dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));
704
705 bad = dmacs_bad[0] | dmacs_bad[1];
706
707 if (PREDICT_FALSE (bad && have_sec_dmac))
708 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200709 ethernet_interface_address_t *sec_addr;
Matthew Smith42bde452019-11-18 09:35:24 -0600710
711 vec_foreach (sec_addr, ei->secondary_addrs)
712 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200713 ASSERT (0 == sec_addr->zero);
714 hwaddr = sec_addr->as_u64;
Matthew Smith42bde452019-11-18 09:35:24 -0600715
716 bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
717 eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
718 dmacs_bad + 1));
719
720 if (!bad)
721 return;
722 }
723 }
724}
725
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500726static_always_inline void
727eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
728 u64 * dmacs, u8 * dmacs_bad,
Matthew G Smithd459bf32019-09-04 15:01:04 -0500729 u32 n_packets, ethernet_interface_t * ei,
730 u8 have_sec_dmac)
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500731{
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200732 u64 hwaddr = ei->address.as_u64;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500733 u64 *dmac = dmacs;
734 u8 *dmac_bad = dmacs_bad;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500735 u32 bad = 0;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500736 i32 n_left = n_packets;
737
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200738 ASSERT (0 == ei->address.zero);
739
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500740#ifdef CLIB_HAVE_VEC256
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500741 while (n_left > 0)
742 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500743 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
744 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500745
746 /* next */
747 dmac += 8;
748 dmac_bad += 8;
749 n_left -= 8;
750 }
751#else
752 while (n_left > 0)
753 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500754 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
755 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
756 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
757 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500758
759 /* next */
760 dmac += 4;
761 dmac_bad += 4;
762 n_left -= 4;
763 }
764#endif
Matthew G Smithd459bf32019-09-04 15:01:04 -0500765
766 if (have_sec_dmac && bad)
767 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200768 ethernet_interface_address_t *addr;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500769
770 vec_foreach (addr, ei->secondary_addrs)
771 {
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200772 u64 hwaddr = addr->as_u64;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500773 i32 n_left = n_packets;
774 u64 *dmac = dmacs;
775 u8 *dmac_bad = dmacs_bad;
776
BenoƮt Ganneb44c77d2020-10-20 16:24:17 +0200777 ASSERT (0 == addr->zero);
778
Matthew G Smithd459bf32019-09-04 15:01:04 -0500779 bad = 0;
780
781 while (n_left > 0)
782 {
783 int adv = 0;
784 int n_bad;
785
786 /* skip any that have already matched */
787 if (!dmac_bad[0])
788 {
789 dmac += 1;
790 dmac_bad += 1;
791 n_left -= 1;
792 continue;
793 }
794
795 n_bad = clib_min (4, n_left);
796
797 /* If >= 4 left, compare 4 together */
798 if (n_bad == 4)
799 {
800 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
801 adv = 4;
802 n_bad = 0;
803 }
804
805 /* handle individually */
806 while (n_bad > 0)
807 {
808 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
809 dmac_bad + adv);
810 adv += 1;
811 n_bad -= 1;
812 }
813
814 dmac += adv;
815 dmac_bad += adv;
816 n_left -= adv;
817 }
818
819 if (!bad) /* can stop looping if everything matched */
820 break;
821 }
822 }
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500823}
824
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100825/* process frame of buffers, store ethertype into array and update
826 buffer metadata fields depending on interface being l2 or l3 assuming that
827 packets are untagged. For tagged packets those fields are updated later.
828 Optionally store Destionation MAC address and tag data into arrays
829 for further processing */
830
831STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
832 "VLIB_FRAME_SIZE must be power of 8");
833static_always_inline void
834eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
835 vnet_hw_interface_t * hi,
836 u32 * buffer_indices, u32 n_packets, int main_is_l3,
837 int ip4_cksum_ok, int dmac_check)
838{
839 ethernet_main_t *em = &ethernet_main;
840 u16 nexts[VLIB_FRAME_SIZE], *next;
841 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
842 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
843 u8 dmacs_bad[VLIB_FRAME_SIZE];
844 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
845 u16 slowpath_indices[VLIB_FRAME_SIZE];
846 u16 n_slowpath, i;
847 u16 next_ip4, next_ip6, next_mpls, next_l2;
848 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
849 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
850 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
851 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
852 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
853 i32 n_left = n_packets;
Zhiyong Yang70312882020-03-27 17:12:35 +0000854 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
855 vlib_buffer_t **b = bufs;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500856 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100857
Zhiyong Yang70312882020-03-27 17:12:35 +0000858 vlib_get_buffers (vm, buffer_indices, b, n_left);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100859
860 while (n_left >= 20)
861 {
862 vlib_buffer_t **ph = b + 16, **pd = b + 8;
Damjan Marion650223c2018-11-14 16:55:53 +0100863
864 vlib_prefetch_buffer_header (ph[0], LOAD);
865 vlib_prefetch_buffer_data (pd[0], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100866 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100867
868 vlib_prefetch_buffer_header (ph[1], LOAD);
869 vlib_prefetch_buffer_data (pd[1], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100870 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100871
872 vlib_prefetch_buffer_header (ph[2], LOAD);
873 vlib_prefetch_buffer_data (pd[2], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100874 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100875
876 vlib_prefetch_buffer_header (ph[3], LOAD);
877 vlib_prefetch_buffer_data (pd[3], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100878 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100879
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100880 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100881
882 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000883 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100884 n_left -= 4;
885 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100886 tag += 4;
887 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100888 }
889 while (n_left >= 4)
890 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100891 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
892 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
893 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
894 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
895 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100896
897 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000898 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100899 n_left -= 4;
900 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100901 tag += 4;
902 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100903 }
904 while (n_left)
905 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100906 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
907 eth_input_adv_and_flags_x1 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100908
909 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000910 b += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100911 n_left -= 1;
912 etype += 1;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100913 tag += 1;
Zhiyong Yangcbe36e42020-05-09 07:13:34 +0000914 dmac += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100915 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100916
917 if (dmac_check)
Matthew G Smithd459bf32019-09-04 15:01:04 -0500918 {
Matthew Smith49389382019-10-02 16:34:27 -0500919 if (ei && vec_len (ei->secondary_addrs))
Matthew G Smithd459bf32019-09-04 15:01:04 -0500920 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
921 ei, 1 /* have_sec_dmac */ );
922 else
923 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
924 ei, 0 /* have_sec_dmac */ );
925 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100926
927 next_ip4 = em->l3_next.input_next_ip4;
928 next_ip6 = em->l3_next.input_next_ip6;
929 next_mpls = em->l3_next.input_next_mpls;
930 next_l2 = em->l2_next;
931
932 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
933 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
934
935#ifdef CLIB_HAVE_VEC256
936 u16x16 et16_ip4 = u16x16_splat (et_ip4);
937 u16x16 et16_ip6 = u16x16_splat (et_ip6);
938 u16x16 et16_mpls = u16x16_splat (et_mpls);
939 u16x16 et16_vlan = u16x16_splat (et_vlan);
940 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
941 u16x16 next16_ip4 = u16x16_splat (next_ip4);
942 u16x16 next16_ip6 = u16x16_splat (next_ip6);
943 u16x16 next16_mpls = u16x16_splat (next_mpls);
944 u16x16 next16_l2 = u16x16_splat (next_l2);
945 u16x16 zero = { 0 };
946 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
947#endif
948
949 etype = etypes;
950 n_left = n_packets;
951 next = nexts;
952 n_slowpath = 0;
953 i = 0;
954
955 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
956 are considered as slowpath, in l2 mode all untagged packets are
957 considered as fastpath */
958 while (n_left > 0)
959 {
960#ifdef CLIB_HAVE_VEC256
961 if (n_left >= 16)
962 {
963 u16x16 r = zero;
964 u16x16 e16 = u16x16_load_unaligned (etype);
965 if (main_is_l3)
966 {
967 r += (e16 == et16_ip4) & next16_ip4;
968 r += (e16 == et16_ip6) & next16_ip6;
969 r += (e16 == et16_mpls) & next16_mpls;
970 }
971 else
972 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
973 u16x16_store_unaligned (r, next);
974
975 if (!u16x16_is_all_zero (r == zero))
976 {
977 if (u16x16_is_all_zero (r))
978 {
979 u16x16_store_unaligned (u16x16_splat (i) + stairs,
980 slowpath_indices + n_slowpath);
981 n_slowpath += 16;
982 }
983 else
984 {
985 for (int j = 0; j < 16; j++)
986 if (next[j] == 0)
987 slowpath_indices[n_slowpath++] = i + j;
988 }
989 }
990
991 etype += 16;
992 next += 16;
993 n_left -= 16;
994 i += 16;
995 continue;
996 }
997#endif
998 if (main_is_l3 && etype[0] == et_ip4)
999 next[0] = next_ip4;
1000 else if (main_is_l3 && etype[0] == et_ip6)
1001 next[0] = next_ip6;
1002 else if (main_is_l3 && etype[0] == et_mpls)
1003 next[0] = next_mpls;
1004 else if (main_is_l3 == 0 &&
1005 etype[0] != et_vlan && etype[0] != et_dot1ad)
1006 next[0] = next_l2;
1007 else
1008 {
1009 next[0] = 0;
1010 slowpath_indices[n_slowpath++] = i;
1011 }
1012
1013 etype += 1;
1014 next += 1;
1015 n_left -= 1;
1016 i += 1;
1017 }
1018
1019 if (n_slowpath)
1020 {
1021 vnet_main_t *vnm = vnet_get_main ();
1022 n_left = n_slowpath;
1023 u16 *si = slowpath_indices;
1024 u32 last_unknown_etype = ~0;
1025 u32 last_unknown_next = ~0;
1026 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
1027 .mask = -1LL,
1028 .tag = tags[si[0]] ^ -1LL,
1029 .sw_if_index = ~0
1030 };
1031
1032 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
1033
1034 while (n_left)
1035 {
1036 i = si[0];
1037 u16 etype = etypes[i];
1038
1039 if (etype == et_vlan)
1040 {
1041 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1042 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1043 &dot1q_lookup, dmacs_bad[i], 0,
1044 main_is_l3, dmac_check);
1045
1046 }
1047 else if (etype == et_dot1ad)
1048 {
1049 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1050 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1051 &dot1ad_lookup, dmacs_bad[i], 1,
1052 main_is_l3, dmac_check);
1053 }
1054 else
1055 {
1056 /* untagged packet with not well known etyertype */
1057 if (last_unknown_etype != etype)
1058 {
1059 last_unknown_etype = etype;
1060 etype = clib_host_to_net_u16 (etype);
1061 last_unknown_next = eth_input_next_by_type (etype);
1062 }
1063 if (dmac_check && main_is_l3 && dmacs_bad[i])
1064 {
1065 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1066 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1067 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1068 }
1069 else
1070 nexts[i] = last_unknown_next;
1071 }
1072
1073 /* next */
1074 n_left--;
1075 si++;
1076 }
1077
1078 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1079 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1080 }
1081
1082 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
Damjan Marion650223c2018-11-14 16:55:53 +01001083}
1084
1085static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001086eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1087 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1088 int ip4_cksum_ok)
Damjan Marion650223c2018-11-14 16:55:53 +01001089{
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001090 ethernet_main_t *em = &ethernet_main;
1091 ethernet_interface_t *ei;
1092 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1093 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1094 subint_config_t *subint0 = &intf0->untagged_subint;
Damjan Marion650223c2018-11-14 16:55:53 +01001095
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001096 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
John Lo4a302ee2020-05-12 22:34:39 -04001097 int int_is_l3 = ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3;
Damjan Marion650223c2018-11-14 16:55:53 +01001098
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001099 if (main_is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +01001100 {
John Lo4a302ee2020-05-12 22:34:39 -04001101 if (int_is_l3 || /* DMAC filter already done by NIC */
1102 ((hi->l2_if_count != 0) && (hi->l3_if_count == 0)))
1103 { /* All L2 usage - DMAC check not needed */
1104 eth_input_process_frame (vm, node, hi, from, n_pkts,
1105 /*is_l3 */ 1, ip4_cksum_ok, 0);
1106 }
Damjan Marion650223c2018-11-14 16:55:53 +01001107 else
John Lo4a302ee2020-05-12 22:34:39 -04001108 { /* DMAC check needed for L3 */
1109 eth_input_process_frame (vm, node, hi, from, n_pkts,
1110 /*is_l3 */ 1, ip4_cksum_ok, 1);
1111 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001112 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001113 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001114 else
Damjan Marion650223c2018-11-14 16:55:53 +01001115 {
John Lo4a302ee2020-05-12 22:34:39 -04001116 if (hi->l3_if_count == 0)
1117 { /* All L2 usage - DMAC check not needed */
1118 eth_input_process_frame (vm, node, hi, from, n_pkts,
1119 /*is_l3 */ 0, ip4_cksum_ok, 0);
1120 }
1121 else
1122 { /* DMAC check needed for L3 */
1123 eth_input_process_frame (vm, node, hi, from, n_pkts,
1124 /*is_l3 */ 0, ip4_cksum_ok, 1);
1125 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001126 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001127 }
1128}
1129
1130static_always_inline void
1131ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1132 vlib_frame_t * from_frame)
1133{
Damjan Marion8fb5add2021-03-04 18:41:59 +01001134 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001135 u32 *from, n_left;
BenoƮt Ganne98477922019-04-10 14:21:11 +02001136 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
Damjan Marion650223c2018-11-14 16:55:53 +01001137 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001138 from = vlib_frame_vector_args (from_frame);
1139 n_left = from_frame->n_vectors;
Damjan Marion650223c2018-11-14 16:55:53 +01001140
Dave Barach5ecd5a52019-02-25 15:27:28 -05001141 while (n_left)
Damjan Marion650223c2018-11-14 16:55:53 +01001142 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001143 ethernet_input_trace_t *t0;
1144 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1145
1146 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1147 {
1148 t0 = vlib_add_trace (vm, node, b0,
1149 sizeof (ethernet_input_trace_t));
1150 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1151 sizeof (t0->packet_data));
1152 t0->frame_flags = from_frame->flags;
1153 clib_memcpy_fast (&t0->frame_data,
1154 vlib_frame_scalar_args (from_frame),
1155 sizeof (ethernet_input_frame_t));
1156 }
1157 from += 1;
1158 n_left -= 1;
Damjan Marion650223c2018-11-14 16:55:53 +01001159 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001160 }
1161
1162 /* rx pcap capture if enabled */
Damjan Marion8fb5add2021-03-04 18:41:59 +01001163 if (PREDICT_FALSE (vnm->pcap.pcap_rx_enable))
Dave Barach5ecd5a52019-02-25 15:27:28 -05001164 {
1165 u32 bi0;
Damjan Marion8fb5add2021-03-04 18:41:59 +01001166 vnet_pcap_t *pp = &vnm->pcap;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001167
1168 from = vlib_frame_vector_args (from_frame);
1169 n_left = from_frame->n_vectors;
1170 while (n_left > 0)
1171 {
1172 vlib_buffer_t *b0;
1173 bi0 = from[0];
1174 from++;
Dave Barach9137e542019-09-13 17:47:50 -04001175 n_left--;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001176 b0 = vlib_get_buffer (vm, bi0);
BenoƮt Ganne30a81952021-02-26 13:47:41 +01001177 if (vnet_is_packet_pcaped (pp, b0, ~0))
1178 pcap_add_buffer (&pp->pcap_main, vm, bi0, pp->max_bytes_per_pkt);
Dave Barach5ecd5a52019-02-25 15:27:28 -05001179 }
Damjan Marion650223c2018-11-14 16:55:53 +01001180 }
1181}
1182
1183static_always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001184ethernet_input_inline (vlib_main_t * vm,
1185 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001186 u32 * from, u32 n_packets,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001187 ethernet_input_variant_t variant)
1188{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001189 vnet_main_t *vnm = vnet_get_main ();
1190 ethernet_main_t *em = &ethernet_main;
1191 vlib_node_runtime_t *error_node;
Damjan Marion650223c2018-11-14 16:55:53 +01001192 u32 n_left_from, next_index, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001193 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Damjan Marion067cd622018-07-11 12:47:43 +02001194 u32 thread_index = vm->thread_index;
Dave Barachcfba1e22016-11-16 10:23:50 -05001195 u32 cached_sw_if_index = ~0;
1196 u32 cached_is_l2 = 0; /* shut up gcc */
John Lo1904c472017-03-10 17:15:22 -05001197 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
Matthew Smith42bde452019-11-18 09:35:24 -06001198 ethernet_interface_t *ei = NULL;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001199 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1200 vlib_buffer_t **b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001201
1202 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1203 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1204 else
1205 error_node = node;
1206
Damjan Marion650223c2018-11-14 16:55:53 +01001207 n_left_from = n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001208
1209 next_index = node->cached_next_index;
1210 stats_sw_if_index = node->runtime_data[0];
1211 stats_n_packets = stats_n_bytes = 0;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001212 vlib_get_buffers (vm, from, bufs, n_left_from);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001213
1214 while (n_left_from > 0)
1215 {
1216 u32 n_left_to_next;
1217
1218 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1219
1220 while (n_left_from >= 4 && n_left_to_next >= 2)
1221 {
1222 u32 bi0, bi1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001223 vlib_buffer_t *b0, *b1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001224 u8 next0, next1, error0, error1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001225 u16 type0, orig_type0, type1, orig_type1;
1226 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1227 u32 match_flags0, match_flags1;
1228 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1229 new_sw_if_index1, len1;
1230 vnet_hw_interface_t *hi0, *hi1;
1231 main_intf_t *main_intf0, *main_intf1;
1232 vlan_intf_t *vlan_intf0, *vlan_intf1;
1233 qinq_intf_t *qinq_intf0, *qinq_intf1;
1234 u32 is_l20, is_l21;
Dave Barachcfba1e22016-11-16 10:23:50 -05001235 ethernet_header_t *e0, *e1;
Matthew Smith42bde452019-11-18 09:35:24 -06001236 u64 dmacs[2];
1237 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001238
1239 /* Prefetch next iteration. */
1240 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001241 vlib_prefetch_buffer_header (b[2], STORE);
1242 vlib_prefetch_buffer_header (b[3], STORE);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001243
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001244 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1245 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001246 }
1247
1248 bi0 = from[0];
1249 bi1 = from[1];
1250 to_next[0] = bi0;
1251 to_next[1] = bi1;
1252 from += 2;
1253 to_next += 2;
1254 n_left_to_next -= 2;
1255 n_left_from -= 2;
1256
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001257 b0 = b[0];
1258 b1 = b[1];
1259 b += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001260
1261 error0 = error1 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001262 e0 = vlib_buffer_get_current (b0);
1263 type0 = clib_net_to_host_u16 (e0->type);
1264 e1 = vlib_buffer_get_current (b1);
1265 type1 = clib_net_to_host_u16 (e1->type);
1266
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001267 /* Set the L2 header offset for all packets */
1268 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1269 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1270 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1271 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1272
John Locc532852016-12-14 15:42:45 -05001273 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001274 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
Damjan Marionc6969b52018-02-19 12:14:06 +01001275 && !ethernet_frame_is_any_tagged_x2 (type0,
1276 type1)))
Dave Barachcfba1e22016-11-16 10:23:50 -05001277 {
1278 main_intf_t *intf0;
1279 subint_config_t *subint0;
1280 u32 sw_if_index0, sw_if_index1;
1281
1282 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1283 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1284 is_l20 = cached_is_l2;
1285
1286 /* This is probably wholly unnecessary */
1287 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1288 goto slowpath;
1289
John Lo1904c472017-03-10 17:15:22 -05001290 /* Now sw_if_index0 == sw_if_index1 */
Dave Barachcfba1e22016-11-16 10:23:50 -05001291 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1292 {
1293 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001294 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001295 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001296 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001297 subint0 = &intf0->untagged_subint;
1298 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1299 }
John Lo7714b302016-12-20 16:59:02 -05001300
Dave Barachcfba1e22016-11-16 10:23:50 -05001301 if (PREDICT_TRUE (is_l20 != 0))
1302 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001303 vnet_buffer (b0)->l3_hdr_offset =
1304 vnet_buffer (b0)->l2_hdr_offset +
1305 sizeof (ethernet_header_t);
1306 vnet_buffer (b1)->l3_hdr_offset =
1307 vnet_buffer (b1)->l2_hdr_offset +
1308 sizeof (ethernet_header_t);
1309 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1310 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001311 next0 = em->l2_next;
1312 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001313 next1 = em->l2_next;
1314 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001315 }
John Locc532852016-12-14 15:42:45 -05001316 else
1317 {
Dave Barach99c6dc62021-02-15 12:46:47 -05001318 if (ei && (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3))
John Lo4a302ee2020-05-12 22:34:39 -04001319 goto skip_dmac_check01;
1320
Matthew Smith42bde452019-11-18 09:35:24 -06001321 dmacs[0] = *(u64 *) e0;
1322 dmacs[1] = *(u64 *) e1;
1323
1324 if (ei && vec_len (ei->secondary_addrs))
1325 ethernet_input_inline_dmac_check (hi, dmacs,
1326 dmacs_bad,
1327 2 /* n_packets */ ,
1328 ei,
1329 1 /* have_sec_dmac */ );
1330 else
1331 ethernet_input_inline_dmac_check (hi, dmacs,
1332 dmacs_bad,
1333 2 /* n_packets */ ,
1334 ei,
1335 0 /* have_sec_dmac */ );
1336
1337 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001338 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001339 if (dmacs_bad[1])
John Lo1904c472017-03-10 17:15:22 -05001340 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001341
John Lo4a302ee2020-05-12 22:34:39 -04001342 skip_dmac_check01:
John Lob14826e2018-04-18 15:52:23 -04001343 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001344 determine_next_node (em, variant, 0, type0, b0,
1345 &error0, &next0);
John Lob14826e2018-04-18 15:52:23 -04001346 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001347 determine_next_node (em, variant, 0, type1, b1,
1348 &error1, &next1);
John Locc532852016-12-14 15:42:45 -05001349 }
1350 goto ship_it01;
Dave Barachcfba1e22016-11-16 10:23:50 -05001351 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001352
John Locc532852016-12-14 15:42:45 -05001353 /* Slow-path for the tagged case */
1354 slowpath:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001355 parse_header (variant,
1356 b0,
1357 &type0,
1358 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001359
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001360 parse_header (variant,
1361 b1,
1362 &type1,
1363 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001364
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001365 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1366 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001367
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001368 eth_vlan_table_lookups (em,
1369 vnm,
1370 old_sw_if_index0,
1371 orig_type0,
1372 outer_id0,
1373 inner_id0,
1374 &hi0,
1375 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001376
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001377 eth_vlan_table_lookups (em,
1378 vnm,
1379 old_sw_if_index1,
1380 orig_type1,
1381 outer_id1,
1382 inner_id1,
1383 &hi1,
1384 &main_intf1, &vlan_intf1, &qinq_intf1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001385
Ivan Shvedunov72869432020-10-15 13:19:35 +03001386 identify_subint (em,
1387 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001388 b0,
1389 match_flags0,
1390 main_intf0,
1391 vlan_intf0,
1392 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001393
Ivan Shvedunov72869432020-10-15 13:19:35 +03001394 identify_subint (em,
1395 hi1,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001396 b1,
1397 match_flags1,
1398 main_intf1,
1399 vlan_intf1,
1400 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001401
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001402 // Save RX sw_if_index for later nodes
1403 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1404 error0 !=
1405 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1406 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1407 error1 !=
1408 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001409
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001410 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1411 if (((new_sw_if_index0 != ~0)
1412 && (new_sw_if_index0 != old_sw_if_index0))
1413 || ((new_sw_if_index1 != ~0)
1414 && (new_sw_if_index1 != old_sw_if_index1)))
1415 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001416
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001417 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001418 - vnet_buffer (b0)->l2_hdr_offset;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001419 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001420 - vnet_buffer (b1)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001421
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001422 stats_n_packets += 2;
1423 stats_n_bytes += len0 + len1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001424
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001425 if (PREDICT_FALSE
1426 (!(new_sw_if_index0 == stats_sw_if_index
1427 && new_sw_if_index1 == stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001428 {
1429 stats_n_packets -= 2;
1430 stats_n_bytes -= len0 + len1;
1431
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001432 if (new_sw_if_index0 != old_sw_if_index0
1433 && new_sw_if_index0 != ~0)
1434 vlib_increment_combined_counter (vnm->
1435 interface_main.combined_sw_if_counters
1436 +
1437 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001438 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001439 new_sw_if_index0, 1,
1440 len0);
1441 if (new_sw_if_index1 != old_sw_if_index1
1442 && new_sw_if_index1 != ~0)
1443 vlib_increment_combined_counter (vnm->
1444 interface_main.combined_sw_if_counters
1445 +
1446 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001447 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001448 new_sw_if_index1, 1,
1449 len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001450
1451 if (new_sw_if_index0 == new_sw_if_index1)
1452 {
1453 if (stats_n_packets > 0)
1454 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001455 vlib_increment_combined_counter
1456 (vnm->interface_main.combined_sw_if_counters
1457 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001458 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001459 stats_sw_if_index,
1460 stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001461 stats_n_packets = stats_n_bytes = 0;
1462 }
1463 stats_sw_if_index = new_sw_if_index0;
1464 }
1465 }
1466 }
1467
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001468 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1469 is_l20 = is_l21 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001470
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001471 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1472 &next0);
1473 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1474 &next1);
1475
John Lo1904c472017-03-10 17:15:22 -05001476 ship_it01:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001477 b0->error = error_node->errors[error0];
1478 b1->error = error_node->errors[error1];
1479
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001480 // verify speculative enqueue
1481 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1482 n_left_to_next, bi0, bi1, next0,
1483 next1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001484 }
1485
1486 while (n_left_from > 0 && n_left_to_next > 0)
1487 {
1488 u32 bi0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001489 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001490 u8 error0, next0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001491 u16 type0, orig_type0;
1492 u16 outer_id0, inner_id0;
1493 u32 match_flags0;
1494 u32 old_sw_if_index0, new_sw_if_index0, len0;
1495 vnet_hw_interface_t *hi0;
1496 main_intf_t *main_intf0;
1497 vlan_intf_t *vlan_intf0;
1498 qinq_intf_t *qinq_intf0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001499 ethernet_header_t *e0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001500 u32 is_l20;
Matthew Smith42bde452019-11-18 09:35:24 -06001501 u64 dmacs[2];
1502 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001503
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001504 // Prefetch next iteration
1505 if (n_left_from > 1)
1506 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001507 vlib_prefetch_buffer_header (b[1], STORE);
1508 CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001509 }
1510
1511 bi0 = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001512 to_next[0] = bi0;
1513 from += 1;
1514 to_next += 1;
1515 n_left_from -= 1;
1516 n_left_to_next -= 1;
1517
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001518 b0 = b[0];
1519 b += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001520
1521 error0 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001522 e0 = vlib_buffer_get_current (b0);
1523 type0 = clib_net_to_host_u16 (e0->type);
1524
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001525 /* Set the L2 header offset for all packets */
1526 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1527 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1528
John Locc532852016-12-14 15:42:45 -05001529 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001530 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1531 && !ethernet_frame_is_tagged (type0)))
1532 {
1533 main_intf_t *intf0;
1534 subint_config_t *subint0;
1535 u32 sw_if_index0;
1536
1537 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1538 is_l20 = cached_is_l2;
1539
1540 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1541 {
1542 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001543 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001544 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001545 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001546 subint0 = &intf0->untagged_subint;
1547 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1548 }
John Lo7714b302016-12-20 16:59:02 -05001549
John Lo7714b302016-12-20 16:59:02 -05001550
Dave Barachcfba1e22016-11-16 10:23:50 -05001551 if (PREDICT_TRUE (is_l20 != 0))
1552 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001553 vnet_buffer (b0)->l3_hdr_offset =
1554 vnet_buffer (b0)->l2_hdr_offset +
1555 sizeof (ethernet_header_t);
1556 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001557 next0 = em->l2_next;
1558 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001559 }
John Locc532852016-12-14 15:42:45 -05001560 else
1561 {
Dave Barach99c6dc62021-02-15 12:46:47 -05001562 if (ei && ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
John Lo4a302ee2020-05-12 22:34:39 -04001563 goto skip_dmac_check0;
1564
Matthew Smith42bde452019-11-18 09:35:24 -06001565 dmacs[0] = *(u64 *) e0;
1566
1567 if (ei && vec_len (ei->secondary_addrs))
1568 ethernet_input_inline_dmac_check (hi, dmacs,
1569 dmacs_bad,
1570 1 /* n_packets */ ,
1571 ei,
1572 1 /* have_sec_dmac */ );
1573 else
1574 ethernet_input_inline_dmac_check (hi, dmacs,
1575 dmacs_bad,
1576 1 /* n_packets */ ,
1577 ei,
1578 0 /* have_sec_dmac */ );
1579
1580 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001581 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001582
John Lo4a302ee2020-05-12 22:34:39 -04001583 skip_dmac_check0:
Andrew Yourtchenkoe78bca12018-10-10 16:15:55 +02001584 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001585 determine_next_node (em, variant, 0, type0, b0,
1586 &error0, &next0);
John Locc532852016-12-14 15:42:45 -05001587 }
1588 goto ship_it0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001589 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001590
John Locc532852016-12-14 15:42:45 -05001591 /* Slow-path for the tagged case */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001592 parse_header (variant,
1593 b0,
1594 &type0,
1595 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001596
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001597 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001598
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001599 eth_vlan_table_lookups (em,
1600 vnm,
1601 old_sw_if_index0,
1602 orig_type0,
1603 outer_id0,
1604 inner_id0,
1605 &hi0,
1606 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001607
Ivan Shvedunov72869432020-10-15 13:19:35 +03001608 identify_subint (em,
1609 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001610 b0,
1611 match_flags0,
1612 main_intf0,
1613 vlan_intf0,
1614 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001615
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001616 // Save RX sw_if_index for later nodes
1617 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1618 error0 !=
1619 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001620
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001621 // Increment subinterface stats
1622 // Note that interface-level counters have already been incremented
1623 // prior to calling this function. Thus only subinterface counters
1624 // are incremented here.
1625 //
Damjan Marion607de1a2016-08-16 22:53:54 +02001626 // Interface level counters include packets received on the main
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001627 // interface and all subinterfaces. Subinterface level counters
1628 // include only those packets received on that subinterface
Ed Warnickecb9cada2015-12-08 15:45:58 -07001629 // Increment stats if the subint is valid and it is not the main intf
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001630 if ((new_sw_if_index0 != ~0)
1631 && (new_sw_if_index0 != old_sw_if_index0))
1632 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001633
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001634 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001635 - vnet_buffer (b0)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001636
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001637 stats_n_packets += 1;
1638 stats_n_bytes += len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001639
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001640 // Batch stat increments from the same subinterface so counters
Damjan Marion607de1a2016-08-16 22:53:54 +02001641 // don't need to be incremented for every packet.
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001642 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1643 {
1644 stats_n_packets -= 1;
1645 stats_n_bytes -= len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001646
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001647 if (new_sw_if_index0 != ~0)
1648 vlib_increment_combined_counter
1649 (vnm->interface_main.combined_sw_if_counters
1650 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001651 thread_index, new_sw_if_index0, 1, len0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001652 if (stats_n_packets > 0)
1653 {
1654 vlib_increment_combined_counter
1655 (vnm->interface_main.combined_sw_if_counters
1656 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001657 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001658 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1659 stats_n_packets = stats_n_bytes = 0;
1660 }
1661 stats_sw_if_index = new_sw_if_index0;
1662 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001663 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001664
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001665 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1666 is_l20 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001667
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001668 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1669 &next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001670
John Lo1904c472017-03-10 17:15:22 -05001671 ship_it0:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001672 b0->error = error_node->errors[error0];
1673
1674 // verify speculative enqueue
Ed Warnickecb9cada2015-12-08 15:45:58 -07001675 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1676 to_next, n_left_to_next,
1677 bi0, next0);
1678 }
1679
1680 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1681 }
1682
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001683 // Increment any remaining batched stats
1684 if (stats_n_packets > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001685 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001686 vlib_increment_combined_counter
1687 (vnm->interface_main.combined_sw_if_counters
1688 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001689 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001690 node->runtime_data[0] = stats_sw_if_index;
1691 }
Damjan Marion650223c2018-11-14 16:55:53 +01001692}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001693
Damjan Marion5beecec2018-09-10 13:09:21 +02001694VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1695 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001696 vlib_frame_t * frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001697{
Damjan Marion650223c2018-11-14 16:55:53 +01001698 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001699 u32 *from = vlib_frame_vector_args (frame);
1700 u32 n_packets = frame->n_vectors;
1701
1702 ethernet_input_trace (vm, node, frame);
1703
1704 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1705 {
Damjan Marion650223c2018-11-14 16:55:53 +01001706 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
Damjan Marion650223c2018-11-14 16:55:53 +01001707 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001708 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1709 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
Damjan Marion650223c2018-11-14 16:55:53 +01001710 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001711 else
1712 ethernet_input_inline (vm, node, from, n_packets,
1713 ETHERNET_INPUT_VARIANT_ETHERNET);
Damjan Marion650223c2018-11-14 16:55:53 +01001714 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001715}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001716
Damjan Marion5beecec2018-09-10 13:09:21 +02001717VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1718 vlib_node_runtime_t * node,
1719 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001720{
Damjan Marion650223c2018-11-14 16:55:53 +01001721 u32 *from = vlib_frame_vector_args (from_frame);
1722 u32 n_packets = from_frame->n_vectors;
1723 ethernet_input_trace (vm, node, from_frame);
1724 ethernet_input_inline (vm, node, from, n_packets,
1725 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1726 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001727}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001728
Damjan Marion5beecec2018-09-10 13:09:21 +02001729VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1730 vlib_node_runtime_t * node,
1731 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001732{
Damjan Marion650223c2018-11-14 16:55:53 +01001733 u32 *from = vlib_frame_vector_args (from_frame);
1734 u32 n_packets = from_frame->n_vectors;
1735 ethernet_input_trace (vm, node, from_frame);
1736 ethernet_input_inline (vm, node, from, n_packets,
1737 ETHERNET_INPUT_VARIANT_NOT_L2);
1738 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001739}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001740
1741
1742// Return the subinterface config struct for the given sw_if_index
1743// Also return via parameter the appropriate match flags for the
1744// configured number of tags.
1745// On error (unsupported or not ethernet) return 0.
1746static subint_config_t *
1747ethernet_sw_interface_get_config (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001748 u32 sw_if_index,
1749 u32 * flags, u32 * unsupported)
1750{
1751 ethernet_main_t *em = &ethernet_main;
1752 vnet_hw_interface_t *hi;
1753 vnet_sw_interface_t *si;
1754 main_intf_t *main_intf;
1755 vlan_table_t *vlan_table;
1756 qinq_table_t *qinq_table;
1757 subint_config_t *subint = 0;
1758
Ed Warnickecb9cada2015-12-08 15:45:58 -07001759 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1760
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001761 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1762 {
1763 *unsupported = 0;
1764 goto done; // non-ethernet interface
1765 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001766
1767 // ensure there's an entry for the main intf (shouldn't really be necessary)
1768 vec_validate (em->main_intfs, hi->hw_if_index);
1769 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1770
1771 // Locate the subint for the given ethernet config
1772 si = vnet_get_sw_interface (vnm, sw_if_index);
1773
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001774 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1775 {
1776 p2p_ethernet_main_t *p2pm = &p2p_main;
1777 u32 p2pe_sw_if_index =
1778 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1779 if (p2pe_sw_if_index == ~0)
1780 {
1781 pool_get (p2pm->p2p_subif_pool, subint);
1782 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1783 }
1784 else
1785 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1786 *flags = SUBINT_CONFIG_P2P;
1787 }
Neale Ranns17ff3c12018-07-04 10:24:24 -07001788 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1789 {
1790 pipe_t *pipe;
1791
1792 pipe = pipe_get (sw_if_index);
1793 subint = &pipe->subint;
1794 *flags = SUBINT_CONFIG_P2P;
1795 }
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001796 else if (si->sub.eth.flags.default_sub)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001797 {
1798 subint = &main_intf->default_subint;
Mike Bly88076742018-09-24 10:13:06 -07001799 *flags = SUBINT_CONFIG_MATCH_1_TAG |
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001800 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1801 }
1802 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1803 {
1804 // if no flags are set then this is a main interface
1805 // so treat as untagged
1806 subint = &main_intf->untagged_subint;
1807 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1808 }
1809 else
1810 {
1811 // one or two tags
1812 // first get the vlan table
1813 if (si->sub.eth.flags.dot1ad)
1814 {
1815 if (main_intf->dot1ad_vlans == 0)
1816 {
1817 // Allocate a vlan table from the pool
1818 pool_get (em->vlan_pool, vlan_table);
1819 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1820 }
1821 else
1822 {
1823 // Get ptr to existing vlan table
1824 vlan_table =
1825 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1826 }
1827 }
1828 else
1829 { // dot1q
1830 if (main_intf->dot1q_vlans == 0)
1831 {
1832 // Allocate a vlan table from the pool
1833 pool_get (em->vlan_pool, vlan_table);
1834 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1835 }
1836 else
1837 {
1838 // Get ptr to existing vlan table
1839 vlan_table =
1840 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1841 }
1842 }
1843
1844 if (si->sub.eth.flags.one_tag)
1845 {
1846 *flags = si->sub.eth.flags.exact_match ?
1847 SUBINT_CONFIG_MATCH_1_TAG :
1848 (SUBINT_CONFIG_MATCH_1_TAG |
1849 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1850
1851 if (si->sub.eth.flags.outer_vlan_id_any)
1852 {
1853 // not implemented yet
1854 *unsupported = 1;
1855 goto done;
1856 }
1857 else
1858 {
1859 // a single vlan, a common case
1860 subint =
1861 &vlan_table->vlans[si->sub.eth.
1862 outer_vlan_id].single_tag_subint;
1863 }
1864
1865 }
1866 else
1867 {
1868 // Two tags
1869 *flags = si->sub.eth.flags.exact_match ?
1870 SUBINT_CONFIG_MATCH_2_TAG :
1871 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1872
1873 if (si->sub.eth.flags.outer_vlan_id_any
1874 && si->sub.eth.flags.inner_vlan_id_any)
1875 {
1876 // not implemented yet
1877 *unsupported = 1;
1878 goto done;
1879 }
1880
1881 if (si->sub.eth.flags.inner_vlan_id_any)
1882 {
1883 // a specific outer and "any" inner
1884 // don't need a qinq table for this
1885 subint =
1886 &vlan_table->vlans[si->sub.eth.
1887 outer_vlan_id].inner_any_subint;
1888 if (si->sub.eth.flags.exact_match)
1889 {
1890 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1891 }
1892 else
1893 {
1894 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1895 SUBINT_CONFIG_MATCH_3_TAG;
1896 }
1897 }
1898 else
1899 {
1900 // a specific outer + specifc innner vlan id, a common case
1901
1902 // get the qinq table
1903 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1904 {
1905 // Allocate a qinq table from the pool
1906 pool_get (em->qinq_pool, qinq_table);
1907 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1908 qinq_table - em->qinq_pool;
1909 }
1910 else
1911 {
1912 // Get ptr to existing qinq table
1913 qinq_table =
1914 vec_elt_at_index (em->qinq_pool,
1915 vlan_table->vlans[si->sub.
1916 eth.outer_vlan_id].
1917 qinqs);
1918 }
1919 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1920 }
1921 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001922 }
1923
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001924done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001925 return subint;
1926}
1927
Damjan Marion5beecec2018-09-10 13:09:21 +02001928static clib_error_t *
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001929ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001930{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001931 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001932 u32 placeholder_flags;
1933 u32 placeholder_unsup;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001934 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001935
1936 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001937 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001938 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1939 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001940
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001941 if (subint == 0)
1942 {
1943 // not implemented yet or not ethernet
1944 goto done;
1945 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001946
1947 subint->sw_if_index =
1948 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1949
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001950done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001951 return error;
1952}
1953
1954VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1955
1956
Damjan Marion5beecec2018-09-10 13:09:21 +02001957#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -07001958// Set the L2/L3 mode for the subinterface
1959void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001960ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001961{
1962 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001963 u32 placeholder_flags;
1964 u32 placeholder_unsup;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001965 int is_port;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001966 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001967
1968 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1969
1970 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001971 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001972 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1973 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001974
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001975 if (subint == 0)
1976 {
1977 // unimplemented or not ethernet
1978 goto done;
1979 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001980
1981 // Double check that the config we found is for our interface (or the interface is down)
1982 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1983
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001984 if (l2)
1985 {
1986 subint->flags |= SUBINT_CONFIG_L2;
1987 if (is_port)
1988 subint->flags |=
1989 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
1990 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1991 }
1992 else
1993 {
1994 subint->flags &= ~SUBINT_CONFIG_L2;
1995 if (is_port)
1996 subint->flags &=
1997 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
1998 | SUBINT_CONFIG_MATCH_3_TAG);
1999 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002000
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002001done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002002 return;
2003}
2004
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002005/*
2006 * Set the L2/L3 mode for the subinterface regardless of port
2007 */
2008void
2009ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002010 u32 sw_if_index, u32 l2)
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002011{
2012 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04002013 u32 placeholder_flags;
2014 u32 placeholder_unsup;
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002015
2016 /* Find the config for this subinterface */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002017 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04002018 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
2019 &placeholder_unsup);
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002020
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002021 if (subint == 0)
2022 {
2023 /* unimplemented or not ethernet */
2024 goto done;
2025 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002026
2027 /*
2028 * Double check that the config we found is for our interface (or the
2029 * interface is down)
2030 */
2031 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2032
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002033 if (l2)
2034 {
2035 subint->flags |= SUBINT_CONFIG_L2;
2036 }
2037 else
2038 {
2039 subint->flags &= ~SUBINT_CONFIG_L2;
2040 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002041
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002042done:
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002043 return;
2044}
Damjan Marion5beecec2018-09-10 13:09:21 +02002045#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07002046
2047static clib_error_t *
2048ethernet_sw_interface_add_del (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002049 u32 sw_if_index, u32 is_create)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002050{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002051 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002052 subint_config_t *subint;
2053 u32 match_flags;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002054 u32 unsupported = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002055
2056 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002057 subint =
2058 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
2059 &unsupported);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002060
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002061 if (subint == 0)
2062 {
2063 // not implemented yet or not ethernet
2064 if (unsupported)
2065 {
Damjan Marion607de1a2016-08-16 22:53:54 +02002066 // this is the NYI case
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002067 error = clib_error_return (0, "not implemented yet");
2068 }
2069 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002070 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002071
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002072 if (!is_create)
2073 {
2074 subint->flags = 0;
2075 return error;
2076 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002077
2078 // Initialize the subint
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002079 if (subint->flags & SUBINT_CONFIG_VALID)
2080 {
2081 // Error vlan already in use
2082 error = clib_error_return (0, "vlan is already in use");
2083 }
2084 else
2085 {
Neale Ranns17ff3c12018-07-04 10:24:24 -07002086 // Note that config is L3 by default
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002087 subint->flags = SUBINT_CONFIG_VALID | match_flags;
2088 subint->sw_if_index = ~0; // because interfaces are initially down
2089 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002090
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002091done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002092 return error;
2093}
2094
2095VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2096
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002097static char *ethernet_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002098#define ethernet_error(n,c,s) s,
2099#include "error.def"
2100#undef ethernet_error
2101};
2102
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002103/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002104VLIB_REGISTER_NODE (ethernet_input_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002105 .name = "ethernet-input",
2106 /* Takes a vector of packets. */
2107 .vector_size = sizeof (u32),
Damjan Marion650223c2018-11-14 16:55:53 +01002108 .scalar_size = sizeof (ethernet_input_frame_t),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002109 .n_errors = ETHERNET_N_ERROR,
2110 .error_strings = ethernet_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002111 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2112 .next_nodes = {
2113#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2114 foreach_ethernet_input_next
2115#undef _
2116 },
Ed Warnickecb9cada2015-12-08 15:45:58 -07002117 .format_buffer = format_ethernet_header_with_length,
2118 .format_trace = format_ethernet_input_trace,
2119 .unformat_buffer = unformat_ethernet_header,
2120};
2121
Damjan Marion5beecec2018-09-10 13:09:21 +02002122VLIB_REGISTER_NODE (ethernet_input_type_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002123 .name = "ethernet-input-type",
2124 /* Takes a vector of packets. */
2125 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002126 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2127 .next_nodes = {
2128#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2129 foreach_ethernet_input_next
2130#undef _
2131 },
2132};
2133
Damjan Marion5beecec2018-09-10 13:09:21 +02002134VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002135 .name = "ethernet-input-not-l2",
2136 /* Takes a vector of packets. */
2137 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002138 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2139 .next_nodes = {
2140#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2141 foreach_ethernet_input_next
2142#undef _
2143 },
2144};
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002145/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002146
Damjan Marion5beecec2018-09-10 13:09:21 +02002147#ifndef CLIB_MARCH_VARIANT
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002148void
2149ethernet_set_rx_redirect (vnet_main_t * vnm,
2150 vnet_hw_interface_t * hi, u32 enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002151{
2152 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2153 // don't go directly to ip4-input)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002154 vnet_hw_interface_rx_redirect_to_node
2155 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002156}
2157
2158
2159/*
2160 * Initialization and registration for the next_by_ethernet structure
2161 */
2162
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002163clib_error_t *
2164next_by_ethertype_init (next_by_ethertype_t * l3_next)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002165{
2166 l3_next->input_next_by_type = sparse_vec_new
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002167 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002168 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2169
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002170 vec_validate (l3_next->sparse_index_by_input_next_index,
2171 ETHERNET_INPUT_NEXT_DROP);
2172 vec_validate (l3_next->sparse_index_by_input_next_index,
2173 ETHERNET_INPUT_NEXT_PUNT);
2174 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2175 SPARSE_VEC_INVALID_INDEX;
2176 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2177 SPARSE_VEC_INVALID_INDEX;
2178
Damjan Marion607de1a2016-08-16 22:53:54 +02002179 /*
2180 * Make sure we don't wipe out an ethernet registration by mistake
Dave Barach1f49ed62016-02-24 11:29:06 -05002181 * Can happen if init function ordering constraints are missing.
2182 */
2183 if (CLIB_DEBUG > 0)
2184 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002185 ethernet_main_t *em = &ethernet_main;
2186 ASSERT (em->next_by_ethertype_register_called == 0);
Dave Barach1f49ed62016-02-24 11:29:06 -05002187 }
2188
Ed Warnickecb9cada2015-12-08 15:45:58 -07002189 return 0;
2190}
2191
2192// Add an ethertype -> next index mapping to the structure
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002193clib_error_t *
2194next_by_ethertype_register (next_by_ethertype_t * l3_next,
2195 u32 ethertype, u32 next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002196{
2197 u32 i;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002198 u16 *n;
2199 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002200
Dave Barach1f49ed62016-02-24 11:29:06 -05002201 if (CLIB_DEBUG > 0)
2202 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002203 ethernet_main_t *em = &ethernet_main;
Dave Barach1f49ed62016-02-24 11:29:06 -05002204 em->next_by_ethertype_register_called = 1;
2205 }
2206
Ed Warnickecb9cada2015-12-08 15:45:58 -07002207 /* Setup ethernet type -> next index sparse vector mapping. */
2208 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2209 n[0] = next_index;
2210
2211 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2212 is updated. */
2213 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2214 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002215 l3_next->
2216 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002217
2218 // do not allow the cached next index's to be updated if L3
2219 // redirect is enabled, as it will have overwritten them
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002220 if (!em->redirect_l3)
2221 {
2222 // Cache common ethertypes directly
2223 if (ethertype == ETHERNET_TYPE_IP4)
2224 {
2225 l3_next->input_next_ip4 = next_index;
2226 }
2227 else if (ethertype == ETHERNET_TYPE_IP6)
2228 {
2229 l3_next->input_next_ip6 = next_index;
2230 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002231 else if (ethertype == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002232 {
2233 l3_next->input_next_mpls = next_index;
2234 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002235 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002236 return 0;
2237}
2238
Dave Barachf8d50682019-05-14 18:01:44 -04002239void
Neale Ranns68d48d92021-06-03 14:59:47 +00002240ethernet_setup_node (vlib_main_t *vm, u32 node_index)
2241{
2242 vlib_node_t *n = vlib_get_node (vm, node_index);
2243 pg_node_t *pn = pg_get_node (node_index);
2244
2245 n->format_buffer = format_ethernet_header_with_length;
2246 n->unformat_buffer = unformat_ethernet_header;
2247 pn->unformat_edit = unformat_pg_ethernet_header;
2248}
2249
2250void
Dave Barachf8d50682019-05-14 18:01:44 -04002251ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002252{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002253 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2254 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002255
2256 ethernet_setup_node (vm, ethernet_input_node.index);
2257 ethernet_setup_node (vm, ethernet_input_type_node.index);
2258 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2259
2260 next_by_ethertype_init (&em->l3_next);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002261
Ed Warnickecb9cada2015-12-08 15:45:58 -07002262 // Initialize pools and vector for vlan parsing
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002263 vec_validate (em->main_intfs, 10); // 10 main interfaces
2264 pool_alloc (em->vlan_pool, 10);
2265 pool_alloc (em->qinq_pool, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002266
2267 // The first vlan pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002268 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002269 // The first qinq pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002270 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002271}
2272
Ed Warnickecb9cada2015-12-08 15:45:58 -07002273void
2274ethernet_register_input_type (vlib_main_t * vm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002275 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002276{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002277 ethernet_main_t *em = &ethernet_main;
2278 ethernet_type_info_t *ti;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002279 u32 i;
2280
2281 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002282 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002283 if (error)
2284 clib_error_report (error);
2285 }
2286
2287 ti = ethernet_get_type_info (em, type);
Dave Barach4bda2d92019-07-03 15:21:50 -04002288 if (ti == 0)
2289 {
2290 clib_warning ("type_info NULL for type %d", type);
2291 return;
2292 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002293 ti->node_index = node_index;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002294 ti->next_index = vlib_node_add_next (vm,
2295 ethernet_input_node.index, node_index);
2296 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002297 ASSERT (i == ti->next_index);
2298
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002299 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002300 ASSERT (i == ti->next_index);
2301
2302 // Add the L3 node for this ethertype to the next nodes structure
2303 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2304
2305 // Call the registration functions for other nodes that want a mapping
2306 l2bvi_register_input_type (vm, type, node_index);
2307}
2308
2309void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002310ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002311{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002312 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002313 u32 i;
2314
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002315 em->l2_next =
2316 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002317
Damjan Marion607de1a2016-08-16 22:53:54 +02002318 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002319 * Even if we never use these arcs, we have to align the next indices...
2320 */
2321 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2322
2323 ASSERT (i == em->l2_next);
2324
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002325 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002326 ASSERT (i == em->l2_next);
2327}
2328
2329// Register a next node for L3 redirect, and enable L3 redirect
2330void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002331ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002332{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002333 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002334 u32 i;
2335
2336 em->redirect_l3 = 1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002337 em->redirect_l3_next = vlib_node_add_next (vm,
2338 ethernet_input_node.index,
2339 node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002340 /*
2341 * Change the cached next nodes to the redirect node
2342 */
2343 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2344 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2345 em->l3_next.input_next_mpls = em->redirect_l3_next;
2346
2347 /*
2348 * Even if we never use these arcs, we have to align the next indices...
2349 */
2350 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2351
2352 ASSERT (i == em->redirect_l3_next);
jerryianff82ed62016-12-05 17:13:00 +08002353
2354 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2355
2356 ASSERT (i == em->redirect_l3_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002357}
Damjan Marion5beecec2018-09-10 13:09:21 +02002358#endif
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002359
2360/*
2361 * fd.io coding-style-patch-verification: ON
2362 *
2363 * Local Variables:
2364 * eval: (c-set-style "gnu")
2365 * End:
2366 */