blob: f4f84f8ca45bf920ce6e004e9d3129b82d603c81 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Damjan Marion650223c2018-11-14 16:55:53 +01002 * Copyright (c) 2018 Cisco and/or its affiliates.
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ethernet_node.c: ethernet packet processing
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vnet/pg/pg.h>
42#include <vnet/ethernet/ethernet.h>
Pavel Kotucek15ac81c2017-06-20 14:00:26 +020043#include <vnet/ethernet/p2p_ethernet.h>
Neale Ranns17ff3c12018-07-04 10:24:24 -070044#include <vnet/devices/pipe/pipe.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045#include <vppinfra/sparse_vec.h>
46#include <vnet/l2/l2_bvi.h>
Dave Barach9137e542019-09-13 17:47:50 -040047#include <vnet/classify/trace_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
Damjan Marion650223c2018-11-14 16:55:53 +010052 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070056typedef enum
57{
Ed Warnickecb9cada2015-12-08 15:45:58 -070058#define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
60#undef _
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070061 ETHERNET_INPUT_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070062} ethernet_input_next_t;
63
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070064typedef struct
65{
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 u8 packet_data[32];
Damjan Marion650223c2018-11-14 16:55:53 +010067 u16 frame_flags;
68 ethernet_input_frame_t frame_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} ethernet_input_trace_t;
70
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070071static u8 *
72format_ethernet_input_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070076 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
Damjan Marion650223c2018-11-14 16:55:53 +010077 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078
Damjan Marion650223c2018-11-14 16:55:53 +010079 if (t->frame_flags)
80 {
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 s = format (s, "%U", format_ethernet_header, t->packet_data);
88
89 return s;
90}
91
Damjan Marione849da22018-09-12 13:32:01 +020092extern vlib_node_registration_t ethernet_input_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070094typedef enum
95{
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
Ed Warnickecb9cada2015-12-08 15:45:58 -070098 ETHERNET_INPUT_VARIANT_NOT_L2,
99} ethernet_input_variant_t;
100
101
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102// Parse the ethernet header to extract vlan tags and innermost ethertype
103static_always_inline void
104parse_header (ethernet_input_variant_t variant,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700105 vlib_buffer_t * b0,
106 u16 * type,
107 u16 * orig_type,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
109{
Chris Luke194ebc52016-04-25 14:26:55 -0400110 u8 vlan_count;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700111
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
114 {
115 ethernet_header_t *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116
Zhiyong Yang9f833582020-04-11 14:36:55 +0000117 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Damjan Marion072401e2017-07-13 18:53:27 +0200119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
Steven35de3b32017-12-03 23:40:54 -0800120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700122 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700124 *type = clib_net_to_host_u16 (e0->type);
125 }
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
127 {
128 // here when prior node was LLC/SNAP processing
129 u16 *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
Zhiyong Yang9f833582020-04-11 14:36:55 +0000131 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700133 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700135 *type = clib_net_to_host_u16 (e0[0]);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137
138 // save for distinguishing between dot1q and dot1ad later
139 *orig_type = *type;
140
141 // default the tags to 0 (used if there is no corresponding tag)
142 *outer_id = 0;
143 *inner_id = 0;
144
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
Chris Luke194ebc52016-04-25 14:26:55 -0400146 vlan_count = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 // check for vlan encaps
Damjan Marionb94bdad2016-09-19 11:32:03 +0200149 if (ethernet_frame_is_tagged (*type))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700150 {
151 ethernet_vlan_header_t *h0;
152 u16 tag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155
Zhiyong Yang9f833582020-04-11 14:36:55 +0000156 h0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
159
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700160 *outer_id = tag & 0xfff;
Neale Ranns30d0fd42017-05-30 07:30:04 -0700161 if (0 == *outer_id)
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700164 *type = clib_net_to_host_u16 (h0->type);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165
166 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700167 vlan_count = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700169 if (*type == ETHERNET_TYPE_VLAN)
170 {
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
173
Zhiyong Yang9f833582020-04-11 14:36:55 +0000174 h0 = vlib_buffer_get_current (b0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700175
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
177
178 *inner_id = tag & 0xfff;
179
180 *type = clib_net_to_host_u16 (h0->type);
181
182 vlib_buffer_advance (b0, sizeof (h0[0]));
183 vlan_count = 2;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700184 if (*type == ETHERNET_TYPE_VLAN)
185 {
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300188
189 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700190 vlan_count = 3; // "unknown" number, aka, 3-or-more
191 }
192 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700194 ethernet_buffer_set_vlan_count (b0, vlan_count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195}
196
Matthew Smith42bde452019-11-18 09:35:24 -0600197static_always_inline void
198ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
199 u64 * dmacs, u8 * dmacs_bad,
200 u32 n_packets, ethernet_interface_t * ei,
201 u8 have_sec_dmac);
202
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203// Determine the subinterface for this packet, given the result of the
204// vlan table lookups and vlan header parsing. Check the most specific
205// matches first.
206static_always_inline void
Ivan Shvedunov72869432020-10-15 13:19:35 +0300207identify_subint (ethernet_main_t * em,
208 vnet_hw_interface_t * hi,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700209 vlib_buffer_t * b0,
210 u32 match_flags,
211 main_intf_t * main_intf,
212 vlan_intf_t * vlan_intf,
213 qinq_intf_t * qinq_intf,
214 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215{
216 u32 matched;
Ivan Shvedunov72869432020-10-15 13:19:35 +0300217 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
Damjan Marionddf6e082018-11-26 16:05:07 +0100219 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
220 qinq_intf, new_sw_if_index, error0, is_l2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700222 if (matched)
223 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700224 // Perform L3 my-mac filter
John Lo4a302ee2020-05-12 22:34:39 -0400225 // A unicast packet arriving on an L3 interface must have a dmac
226 // matching the interface mac. If interface has STATUS_L3 bit set
227 // mac filter is already done.
Ivan Shvedunov72869432020-10-15 13:19:35 +0300228 if (!(*is_l2 || (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700229 {
Matthew Smith42bde452019-11-18 09:35:24 -0600230 u64 dmacs[2];
231 u8 dmacs_bad[2];
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700232 ethernet_header_t *e0;
Matthew Smith42bde452019-11-18 09:35:24 -0600233 ethernet_interface_t *ei0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234
Matthew Smith42bde452019-11-18 09:35:24 -0600235 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
236 dmacs[0] = *(u64 *) e0;
237 ei0 = ethernet_get_interface (&ethernet_main, hi->hw_if_index);
238
239 if (ei0 && vec_len (ei0->secondary_addrs))
240 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
241 1 /* n_packets */ , ei0,
242 1 /* have_sec_dmac */ );
243 else
244 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
245 1 /* n_packets */ , ei0,
246 0 /* have_sec_dmac */ );
Matthew Smith42bde452019-11-18 09:35:24 -0600247 if (dmacs_bad[0])
248 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700249 }
250
251 // Check for down subinterface
252 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254}
255
256static_always_inline void
257determine_next_node (ethernet_main_t * em,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700258 ethernet_input_variant_t variant,
259 u32 is_l20,
260 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261{
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200262 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
263 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
264
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700265 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
266 {
267 // some error occurred
268 *next0 = ETHERNET_INPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700270 else if (is_l20)
271 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700272 // record the L2 len and reset the buffer so the L2 header is preserved
John Lob14826e2018-04-18 15:52:23 -0400273 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
274 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
275 *next0 = em->l2_next;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300276 ASSERT (vnet_buffer (b0)->l2.l2_len ==
277 ethernet_buffer_header_size (b0));
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200278 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700279
280 // check for common IP/MPLS ethertypes
281 }
282 else if (type0 == ETHERNET_TYPE_IP4)
283 {
284 *next0 = em->l3_next.input_next_ip4;
285 }
286 else if (type0 == ETHERNET_TYPE_IP6)
287 {
288 *next0 = em->l3_next.input_next_ip6;
289 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800290 else if (type0 == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700291 {
292 *next0 = em->l3_next.input_next_mpls;
293
294 }
295 else if (em->redirect_l3)
296 {
297 // L3 Redirect is on, the cached common next nodes will be
298 // pointing to the redirect node, catch the uncommon types here
299 *next0 = em->redirect_l3_next;
300 }
301 else
302 {
303 // uncommon ethertype, check table
304 u32 i0;
305 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
306 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
307 *error0 =
308 i0 ==
309 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
310
311 // The table is not populated with LLC values, so check that now.
Damjan Marion607de1a2016-08-16 22:53:54 +0200312 // If variant is variant_ethernet then we came from LLC processing. Don't
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700313 // go back there; drop instead using by keeping the drop/bad table result.
314 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
315 {
316 *next0 = ETHERNET_INPUT_NEXT_LLC;
317 }
318 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319}
320
Damjan Marion650223c2018-11-14 16:55:53 +0100321
322/* following vector code relies on following assumptions */
323STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
324STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
325STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
326STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
327 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
328 "l3_hdr_offset must follow l2_hdr_offset");
329
330static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100331eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100332{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100333 i16 adv = sizeof (ethernet_header_t);
334 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
335 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
336
Damjan Marion650223c2018-11-14 16:55:53 +0100337#ifdef CLIB_HAVE_VEC256
338 /* to reduce number of small loads/stores we are loading first 64 bits
339 of each buffer metadata into 256-bit register so we can advance
340 current_data, current_length and flags.
341 Observed saving of this code is ~2 clocks per packet */
342 u64x4 r, radv;
343
344 /* vector if signed 16 bit integers used in signed vector add operation
345 to advnce current_data and current_length */
346 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
347 i16x16 adv4 = {
348 adv, -adv, 0, 0, adv, -adv, 0, 0,
349 adv, -adv, 0, 0, adv, -adv, 0, 0
350 };
351
352 /* load 4 x 64 bits */
353 r = u64x4_gather (b[0], b[1], b[2], b[3]);
354
355 /* set flags */
356 r |= (u64x4) flags4;
357
358 /* advance buffer */
359 radv = (u64x4) ((i16x16) r + adv4);
360
361 /* write 4 x 64 bits */
362 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
363
364 /* use old current_data as l2_hdr_offset and new current_data as
365 l3_hdr_offset */
366 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
367
368 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
369 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
370 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
371 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
372 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
373
Damjan Marione9cebdf2018-11-21 00:47:42 +0100374 if (is_l3)
375 {
376 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
377 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
378 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
379 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
Damjan Marion650223c2018-11-14 16:55:53 +0100380
Damjan Marione9cebdf2018-11-21 00:47:42 +0100381 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
382 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
383 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
384 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
385 }
386 else
387 {
388 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
389 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
390 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
391 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
392
393 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
394 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
395 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
396 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
397 }
Damjan Marion650223c2018-11-14 16:55:53 +0100398
399#else
400 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
401 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
402 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
403 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
404 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
405 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
406 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
407 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
408
409 if (is_l3)
410 {
411 vlib_buffer_advance (b[0], adv);
412 vlib_buffer_advance (b[1], adv);
413 vlib_buffer_advance (b[2], adv);
414 vlib_buffer_advance (b[3], adv);
415 }
416
417 b[0]->flags |= flags;
418 b[1]->flags |= flags;
419 b[2]->flags |= flags;
420 b[3]->flags |= flags;
421#endif
422
423 if (!is_l3)
424 {
425 vnet_buffer (b[0])->l2.l2_len = adv;
426 vnet_buffer (b[1])->l2.l2_len = adv;
427 vnet_buffer (b[2])->l2.l2_len = adv;
428 vnet_buffer (b[3])->l2.l2_len = adv;
429 }
430}
431
432static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100433eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100434{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100435 i16 adv = sizeof (ethernet_header_t);
436 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
437 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
438
Damjan Marion650223c2018-11-14 16:55:53 +0100439 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
440 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
441
442 if (is_l3)
443 vlib_buffer_advance (b[0], adv);
444 b[0]->flags |= flags;
445 if (!is_l3)
446 vnet_buffer (b[0])->l2.l2_len = adv;
447}
448
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100449
Damjan Marion650223c2018-11-14 16:55:53 +0100450static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100451eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
452 u64 * dmacs, int offset, int dmac_check)
Damjan Marion650223c2018-11-14 16:55:53 +0100453{
Damjan Marion650223c2018-11-14 16:55:53 +0100454 ethernet_header_t *e;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100455 e = vlib_buffer_get_current (b[offset]);
456#ifdef CLIB_HAVE_VEC128
457 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
458 etype[offset] = ((u16x8) r)[3];
459 tags[offset] = r[1];
460#else
461 etype[offset] = e->type;
462 tags[offset] = *(u64 *) (e + 1);
463#endif
Damjan Marion650223c2018-11-14 16:55:53 +0100464
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100465 if (dmac_check)
466 dmacs[offset] = *(u64 *) e;
467}
Damjan Marion650223c2018-11-14 16:55:53 +0100468
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100469static_always_inline u16
470eth_input_next_by_type (u16 etype)
471{
472 ethernet_main_t *em = &ethernet_main;
473
474 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
475 vec_elt (em->l3_next.input_next_by_type,
476 sparse_vec_index (em->l3_next.input_next_by_type, etype));
477}
478
479typedef struct
480{
481 u64 tag, mask;
482 u32 sw_if_index;
483 u16 type, len, next;
484 i16 adv;
485 u8 err, n_tags;
486 u64 n_packets, n_bytes;
487} eth_input_tag_lookup_t;
488
489static_always_inline void
490eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
491 eth_input_tag_lookup_t * l)
492{
493 if (l->n_packets == 0 || l->sw_if_index == ~0)
494 return;
495
496 if (l->adv > 0)
497 l->n_bytes += l->n_packets * l->len;
498
499 vlib_increment_combined_counter
500 (vnm->interface_main.combined_sw_if_counters +
501 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
502 l->n_packets, l->n_bytes);
503}
504
505static_always_inline void
506eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
507 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
508 u64 tag, u16 * next, vlib_buffer_t * b,
509 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
510 int main_is_l3, int check_dmac)
511{
512 ethernet_main_t *em = &ethernet_main;
513
514 if ((tag ^ l->tag) & l->mask)
Damjan Marion650223c2018-11-14 16:55:53 +0100515 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100516 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
517 vlan_intf_t *vif;
518 qinq_intf_t *qif;
519 vlan_table_t *vlan_table;
520 qinq_table_t *qinq_table;
521 u16 *t = (u16 *) & tag;
522 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
523 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
524 u32 matched, is_l2, new_sw_if_index;
525
526 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
527 mif->dot1ad_vlans : mif->dot1q_vlans);
528 vif = &vlan_table->vlans[vlan1];
529 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
530 qif = &qinq_table->vlans[vlan2];
531 l->err = ETHERNET_ERROR_NONE;
532 l->type = clib_net_to_host_u16 (t[1]);
533
534 if (l->type == ETHERNET_TYPE_VLAN)
535 {
536 l->type = clib_net_to_host_u16 (t[3]);
537 l->n_tags = 2;
538 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
539 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
540 qif, &new_sw_if_index, &l->err,
541 &is_l2);
542 }
543 else
544 {
545 l->n_tags = 1;
546 if (vlan1 == 0)
547 {
548 new_sw_if_index = hi->sw_if_index;
549 l->err = ETHERNET_ERROR_NONE;
550 matched = 1;
551 is_l2 = main_is_l3 == 0;
552 }
553 else
554 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
555 SUBINT_CONFIG_MATCH_1_TAG, mif,
556 vif, qif, &new_sw_if_index,
557 &l->err, &is_l2);
558 }
559
560 if (l->sw_if_index != new_sw_if_index)
561 {
562 eth_input_update_if_counters (vm, vnm, l);
563 l->n_packets = 0;
564 l->n_bytes = 0;
565 l->sw_if_index = new_sw_if_index;
566 }
567 l->tag = tag;
568 l->mask = (l->n_tags == 2) ?
569 clib_net_to_host_u64 (0xffffffffffffffff) :
570 clib_net_to_host_u64 (0xffffffff00000000);
571
572 if (matched && l->sw_if_index == ~0)
573 l->err = ETHERNET_ERROR_DOWN;
574
575 l->len = sizeof (ethernet_header_t) +
576 l->n_tags * sizeof (ethernet_vlan_header_t);
577 if (main_is_l3)
578 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
579 l->n_tags * sizeof (ethernet_vlan_header_t);
580 else
581 l->adv = is_l2 ? 0 : l->len;
582
583 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
584 l->next = ETHERNET_INPUT_NEXT_DROP;
585 else if (is_l2)
586 l->next = em->l2_next;
587 else if (l->type == ETHERNET_TYPE_IP4)
588 l->next = em->l3_next.input_next_ip4;
589 else if (l->type == ETHERNET_TYPE_IP6)
590 l->next = em->l3_next.input_next_ip6;
591 else if (l->type == ETHERNET_TYPE_MPLS)
592 l->next = em->l3_next.input_next_mpls;
593 else if (em->redirect_l3)
594 l->next = em->redirect_l3_next;
595 else
596 {
597 l->next = eth_input_next_by_type (l->type);
598 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
599 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
600 }
601 }
602
603 if (check_dmac && l->adv > 0 && dmac_bad)
604 {
605 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
606 next[0] = ETHERNET_INPUT_NEXT_PUNT;
607 }
608 else
609 next[0] = l->next;
610
611 vlib_buffer_advance (b, l->adv);
612 vnet_buffer (b)->l2.l2_len = l->len;
613 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
614
615 if (l->err == ETHERNET_ERROR_NONE)
616 {
617 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
618 ethernet_buffer_set_vlan_count (b, l->n_tags);
619 }
620 else
621 b->error = node->errors[l->err];
622
623 /* update counters */
624 l->n_packets += 1;
625 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
626}
627
Matthew G Smithd459bf32019-09-04 15:01:04 -0500628#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
629#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
630
631#ifdef CLIB_HAVE_VEC256
632static_always_inline u32
633is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
634{
635 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
636 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
637 return u8x32_msb_mask ((u8x32) (r0));
638}
Matthew Smith42bde452019-11-18 09:35:24 -0600639#endif
640
Matthew G Smithd459bf32019-09-04 15:01:04 -0500641static_always_inline u8
642is_dmac_bad (u64 dmac, u64 hwaddr)
643{
644 u64 r0 = dmac & DMAC_MASK;
645 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
646}
Matthew G Smithd459bf32019-09-04 15:01:04 -0500647
648static_always_inline u8
649is_sec_dmac_bad (u64 dmac, u64 hwaddr)
650{
651 return ((dmac & DMAC_MASK) != hwaddr);
652}
653
654#ifdef CLIB_HAVE_VEC256
655static_always_inline u32
656is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
657{
658 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
659 r0 = (r0 != u64x4_splat (hwaddr));
660 return u8x32_msb_mask ((u8x32) (r0));
661}
662#endif
663
664static_always_inline u8
665eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
666{
667 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
668 return dmac_bad[0];
669}
670
671static_always_inline u32
672eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
673{
674#ifdef CLIB_HAVE_VEC256
675 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
676#else
677 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
678 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
679 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
680 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
681#endif
682 return *(u32 *) dmac_bad;
683}
684
Matthew Smith42bde452019-11-18 09:35:24 -0600685/*
686 * DMAC check for ethernet_input_inline()
687 *
688 * dmacs and dmacs_bad are arrays that are 2 elements long
689 * n_packets should be 1 or 2 for ethernet_input_inline()
690 */
691static_always_inline void
692ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
693 u64 * dmacs, u8 * dmacs_bad,
694 u32 n_packets, ethernet_interface_t * ei,
695 u8 have_sec_dmac)
696{
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200697 u64 hwaddr = ei->address.as_u64;
Matthew Smith42bde452019-11-18 09:35:24 -0600698 u8 bad = 0;
699
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200700 ASSERT (0 == ei->address.zero);
701
Matthew Smith42bde452019-11-18 09:35:24 -0600702 dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
703 dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));
704
705 bad = dmacs_bad[0] | dmacs_bad[1];
706
707 if (PREDICT_FALSE (bad && have_sec_dmac))
708 {
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200709 ethernet_interface_address_t *sec_addr;
Matthew Smith42bde452019-11-18 09:35:24 -0600710
711 vec_foreach (sec_addr, ei->secondary_addrs)
712 {
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200713 ASSERT (0 == sec_addr->zero);
714 hwaddr = sec_addr->as_u64;
Matthew Smith42bde452019-11-18 09:35:24 -0600715
716 bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
717 eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
718 dmacs_bad + 1));
719
720 if (!bad)
721 return;
722 }
723 }
724}
725
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500726static_always_inline void
727eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
728 u64 * dmacs, u8 * dmacs_bad,
Matthew G Smithd459bf32019-09-04 15:01:04 -0500729 u32 n_packets, ethernet_interface_t * ei,
730 u8 have_sec_dmac)
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500731{
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200732 u64 hwaddr = ei->address.as_u64;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500733 u64 *dmac = dmacs;
734 u8 *dmac_bad = dmacs_bad;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500735 u32 bad = 0;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500736 i32 n_left = n_packets;
737
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200738 ASSERT (0 == ei->address.zero);
739
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500740#ifdef CLIB_HAVE_VEC256
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500741 while (n_left > 0)
742 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500743 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
744 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500745
746 /* next */
747 dmac += 8;
748 dmac_bad += 8;
749 n_left -= 8;
750 }
751#else
752 while (n_left > 0)
753 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500754 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
755 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
756 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
757 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500758
759 /* next */
760 dmac += 4;
761 dmac_bad += 4;
762 n_left -= 4;
763 }
764#endif
Matthew G Smithd459bf32019-09-04 15:01:04 -0500765
766 if (have_sec_dmac && bad)
767 {
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200768 ethernet_interface_address_t *addr;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500769
770 vec_foreach (addr, ei->secondary_addrs)
771 {
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200772 u64 hwaddr = addr->as_u64;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500773 i32 n_left = n_packets;
774 u64 *dmac = dmacs;
775 u8 *dmac_bad = dmacs_bad;
776
Benoît Ganneb44c77d2020-10-20 16:24:17 +0200777 ASSERT (0 == addr->zero);
778
Matthew G Smithd459bf32019-09-04 15:01:04 -0500779 bad = 0;
780
781 while (n_left > 0)
782 {
783 int adv = 0;
784 int n_bad;
785
786 /* skip any that have already matched */
787 if (!dmac_bad[0])
788 {
789 dmac += 1;
790 dmac_bad += 1;
791 n_left -= 1;
792 continue;
793 }
794
795 n_bad = clib_min (4, n_left);
796
797 /* If >= 4 left, compare 4 together */
798 if (n_bad == 4)
799 {
800 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
801 adv = 4;
802 n_bad = 0;
803 }
804
805 /* handle individually */
806 while (n_bad > 0)
807 {
808 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
809 dmac_bad + adv);
810 adv += 1;
811 n_bad -= 1;
812 }
813
814 dmac += adv;
815 dmac_bad += adv;
816 n_left -= adv;
817 }
818
819 if (!bad) /* can stop looping if everything matched */
820 break;
821 }
822 }
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500823}
824
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100825/* process frame of buffers, store ethertype into array and update
826 buffer metadata fields depending on interface being l2 or l3 assuming that
827 packets are untagged. For tagged packets those fields are updated later.
828 Optionally store Destionation MAC address and tag data into arrays
829 for further processing */
830
831STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
832 "VLIB_FRAME_SIZE must be power of 8");
833static_always_inline void
834eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
835 vnet_hw_interface_t * hi,
836 u32 * buffer_indices, u32 n_packets, int main_is_l3,
837 int ip4_cksum_ok, int dmac_check)
838{
839 ethernet_main_t *em = &ethernet_main;
840 u16 nexts[VLIB_FRAME_SIZE], *next;
841 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
842 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
843 u8 dmacs_bad[VLIB_FRAME_SIZE];
844 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
845 u16 slowpath_indices[VLIB_FRAME_SIZE];
846 u16 n_slowpath, i;
847 u16 next_ip4, next_ip6, next_mpls, next_l2;
848 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
849 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
850 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
851 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
852 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
853 i32 n_left = n_packets;
Zhiyong Yang70312882020-03-27 17:12:35 +0000854 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
855 vlib_buffer_t **b = bufs;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500856 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100857
Zhiyong Yang70312882020-03-27 17:12:35 +0000858 vlib_get_buffers (vm, buffer_indices, b, n_left);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100859
860 while (n_left >= 20)
861 {
862 vlib_buffer_t **ph = b + 16, **pd = b + 8;
Damjan Marion650223c2018-11-14 16:55:53 +0100863
864 vlib_prefetch_buffer_header (ph[0], LOAD);
865 vlib_prefetch_buffer_data (pd[0], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100866 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100867
868 vlib_prefetch_buffer_header (ph[1], LOAD);
869 vlib_prefetch_buffer_data (pd[1], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100870 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100871
872 vlib_prefetch_buffer_header (ph[2], LOAD);
873 vlib_prefetch_buffer_data (pd[2], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100874 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100875
876 vlib_prefetch_buffer_header (ph[3], LOAD);
877 vlib_prefetch_buffer_data (pd[3], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100878 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100879
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100880 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100881
882 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000883 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100884 n_left -= 4;
885 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100886 tag += 4;
887 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100888 }
889 while (n_left >= 4)
890 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100891 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
892 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
893 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
894 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
895 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100896
897 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000898 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100899 n_left -= 4;
900 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100901 tag += 4;
902 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100903 }
904 while (n_left)
905 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100906 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
907 eth_input_adv_and_flags_x1 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100908
909 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000910 b += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100911 n_left -= 1;
912 etype += 1;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100913 tag += 1;
Zhiyong Yangcbe36e42020-05-09 07:13:34 +0000914 dmac += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100915 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100916
917 if (dmac_check)
Matthew G Smithd459bf32019-09-04 15:01:04 -0500918 {
Matthew Smith49389382019-10-02 16:34:27 -0500919 if (ei && vec_len (ei->secondary_addrs))
Matthew G Smithd459bf32019-09-04 15:01:04 -0500920 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
921 ei, 1 /* have_sec_dmac */ );
922 else
923 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
924 ei, 0 /* have_sec_dmac */ );
925 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100926
927 next_ip4 = em->l3_next.input_next_ip4;
928 next_ip6 = em->l3_next.input_next_ip6;
929 next_mpls = em->l3_next.input_next_mpls;
930 next_l2 = em->l2_next;
931
932 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
933 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
934
935#ifdef CLIB_HAVE_VEC256
936 u16x16 et16_ip4 = u16x16_splat (et_ip4);
937 u16x16 et16_ip6 = u16x16_splat (et_ip6);
938 u16x16 et16_mpls = u16x16_splat (et_mpls);
939 u16x16 et16_vlan = u16x16_splat (et_vlan);
940 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
941 u16x16 next16_ip4 = u16x16_splat (next_ip4);
942 u16x16 next16_ip6 = u16x16_splat (next_ip6);
943 u16x16 next16_mpls = u16x16_splat (next_mpls);
944 u16x16 next16_l2 = u16x16_splat (next_l2);
945 u16x16 zero = { 0 };
946 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
947#endif
948
949 etype = etypes;
950 n_left = n_packets;
951 next = nexts;
952 n_slowpath = 0;
953 i = 0;
954
955 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
956 are considered as slowpath, in l2 mode all untagged packets are
957 considered as fastpath */
958 while (n_left > 0)
959 {
960#ifdef CLIB_HAVE_VEC256
961 if (n_left >= 16)
962 {
963 u16x16 r = zero;
964 u16x16 e16 = u16x16_load_unaligned (etype);
965 if (main_is_l3)
966 {
967 r += (e16 == et16_ip4) & next16_ip4;
968 r += (e16 == et16_ip6) & next16_ip6;
969 r += (e16 == et16_mpls) & next16_mpls;
970 }
971 else
972 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
973 u16x16_store_unaligned (r, next);
974
975 if (!u16x16_is_all_zero (r == zero))
976 {
977 if (u16x16_is_all_zero (r))
978 {
979 u16x16_store_unaligned (u16x16_splat (i) + stairs,
980 slowpath_indices + n_slowpath);
981 n_slowpath += 16;
982 }
983 else
984 {
985 for (int j = 0; j < 16; j++)
986 if (next[j] == 0)
987 slowpath_indices[n_slowpath++] = i + j;
988 }
989 }
990
991 etype += 16;
992 next += 16;
993 n_left -= 16;
994 i += 16;
995 continue;
996 }
997#endif
998 if (main_is_l3 && etype[0] == et_ip4)
999 next[0] = next_ip4;
1000 else if (main_is_l3 && etype[0] == et_ip6)
1001 next[0] = next_ip6;
1002 else if (main_is_l3 && etype[0] == et_mpls)
1003 next[0] = next_mpls;
1004 else if (main_is_l3 == 0 &&
1005 etype[0] != et_vlan && etype[0] != et_dot1ad)
1006 next[0] = next_l2;
1007 else
1008 {
1009 next[0] = 0;
1010 slowpath_indices[n_slowpath++] = i;
1011 }
1012
1013 etype += 1;
1014 next += 1;
1015 n_left -= 1;
1016 i += 1;
1017 }
1018
1019 if (n_slowpath)
1020 {
1021 vnet_main_t *vnm = vnet_get_main ();
1022 n_left = n_slowpath;
1023 u16 *si = slowpath_indices;
1024 u32 last_unknown_etype = ~0;
1025 u32 last_unknown_next = ~0;
1026 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
1027 .mask = -1LL,
1028 .tag = tags[si[0]] ^ -1LL,
1029 .sw_if_index = ~0
1030 };
1031
1032 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
1033
1034 while (n_left)
1035 {
1036 i = si[0];
1037 u16 etype = etypes[i];
1038
1039 if (etype == et_vlan)
1040 {
1041 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1042 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1043 &dot1q_lookup, dmacs_bad[i], 0,
1044 main_is_l3, dmac_check);
1045
1046 }
1047 else if (etype == et_dot1ad)
1048 {
1049 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1050 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1051 &dot1ad_lookup, dmacs_bad[i], 1,
1052 main_is_l3, dmac_check);
1053 }
1054 else
1055 {
1056 /* untagged packet with not well known etyertype */
1057 if (last_unknown_etype != etype)
1058 {
1059 last_unknown_etype = etype;
1060 etype = clib_host_to_net_u16 (etype);
1061 last_unknown_next = eth_input_next_by_type (etype);
1062 }
1063 if (dmac_check && main_is_l3 && dmacs_bad[i])
1064 {
1065 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1066 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1067 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1068 }
1069 else
1070 nexts[i] = last_unknown_next;
1071 }
1072
1073 /* next */
1074 n_left--;
1075 si++;
1076 }
1077
1078 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1079 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1080 }
1081
1082 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
Damjan Marion650223c2018-11-14 16:55:53 +01001083}
1084
1085static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001086eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1087 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1088 int ip4_cksum_ok)
Damjan Marion650223c2018-11-14 16:55:53 +01001089{
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001090 ethernet_main_t *em = &ethernet_main;
1091 ethernet_interface_t *ei;
1092 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1093 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1094 subint_config_t *subint0 = &intf0->untagged_subint;
Damjan Marion650223c2018-11-14 16:55:53 +01001095
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001096 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
John Lo4a302ee2020-05-12 22:34:39 -04001097 int int_is_l3 = ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3;
Damjan Marion650223c2018-11-14 16:55:53 +01001098
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001099 if (main_is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +01001100 {
John Lo4a302ee2020-05-12 22:34:39 -04001101 if (int_is_l3 || /* DMAC filter already done by NIC */
1102 ((hi->l2_if_count != 0) && (hi->l3_if_count == 0)))
1103 { /* All L2 usage - DMAC check not needed */
1104 eth_input_process_frame (vm, node, hi, from, n_pkts,
1105 /*is_l3 */ 1, ip4_cksum_ok, 0);
1106 }
Damjan Marion650223c2018-11-14 16:55:53 +01001107 else
John Lo4a302ee2020-05-12 22:34:39 -04001108 { /* DMAC check needed for L3 */
1109 eth_input_process_frame (vm, node, hi, from, n_pkts,
1110 /*is_l3 */ 1, ip4_cksum_ok, 1);
1111 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001112 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001113 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001114 else
Damjan Marion650223c2018-11-14 16:55:53 +01001115 {
John Lo4a302ee2020-05-12 22:34:39 -04001116 if (hi->l3_if_count == 0)
1117 { /* All L2 usage - DMAC check not needed */
1118 eth_input_process_frame (vm, node, hi, from, n_pkts,
1119 /*is_l3 */ 0, ip4_cksum_ok, 0);
1120 }
1121 else
1122 { /* DMAC check needed for L3 */
1123 eth_input_process_frame (vm, node, hi, from, n_pkts,
1124 /*is_l3 */ 0, ip4_cksum_ok, 1);
1125 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001126 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001127 }
1128}
1129
1130static_always_inline void
1131ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1132 vlib_frame_t * from_frame)
1133{
1134 u32 *from, n_left;
Benoît Ganne98477922019-04-10 14:21:11 +02001135 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
Damjan Marion650223c2018-11-14 16:55:53 +01001136 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001137 from = vlib_frame_vector_args (from_frame);
1138 n_left = from_frame->n_vectors;
Damjan Marion650223c2018-11-14 16:55:53 +01001139
Dave Barach5ecd5a52019-02-25 15:27:28 -05001140 while (n_left)
Damjan Marion650223c2018-11-14 16:55:53 +01001141 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001142 ethernet_input_trace_t *t0;
1143 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1144
1145 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1146 {
1147 t0 = vlib_add_trace (vm, node, b0,
1148 sizeof (ethernet_input_trace_t));
1149 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1150 sizeof (t0->packet_data));
1151 t0->frame_flags = from_frame->flags;
1152 clib_memcpy_fast (&t0->frame_data,
1153 vlib_frame_scalar_args (from_frame),
1154 sizeof (ethernet_input_frame_t));
1155 }
1156 from += 1;
1157 n_left -= 1;
Damjan Marion650223c2018-11-14 16:55:53 +01001158 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001159 }
1160
1161 /* rx pcap capture if enabled */
Dave Barach33909772019-09-23 10:27:27 -04001162 if (PREDICT_FALSE (vlib_global_main.pcap.pcap_rx_enable))
Dave Barach5ecd5a52019-02-25 15:27:28 -05001163 {
1164 u32 bi0;
Dave Barach33909772019-09-23 10:27:27 -04001165 vnet_pcap_t *pp = &vlib_global_main.pcap;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001166
1167 from = vlib_frame_vector_args (from_frame);
1168 n_left = from_frame->n_vectors;
1169 while (n_left > 0)
1170 {
Dave Barach9137e542019-09-13 17:47:50 -04001171 int classify_filter_result;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001172 vlib_buffer_t *b0;
1173 bi0 = from[0];
1174 from++;
Dave Barach9137e542019-09-13 17:47:50 -04001175 n_left--;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001176 b0 = vlib_get_buffer (vm, bi0);
Dave Barachf5667c32019-09-25 11:27:46 -04001177 if (pp->filter_classify_table_index != ~0)
Dave Barach9137e542019-09-13 17:47:50 -04001178 {
1179 classify_filter_result =
1180 vnet_is_packet_traced_inline
Dave Barachf5667c32019-09-25 11:27:46 -04001181 (b0, pp->filter_classify_table_index, 0 /* full classify */ );
Dave Barach9137e542019-09-13 17:47:50 -04001182 if (classify_filter_result)
Dave Barach33909772019-09-23 10:27:27 -04001183 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1184 pp->max_bytes_per_pkt);
Dave Barach9137e542019-09-13 17:47:50 -04001185 continue;
1186 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001187
Dave Barach33909772019-09-23 10:27:27 -04001188 if (pp->pcap_sw_if_index == 0 ||
1189 pp->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
Dave Barach5ecd5a52019-02-25 15:27:28 -05001190 {
Dave Barachd28437c2019-11-20 09:28:31 -05001191 vnet_main_t *vnm = vnet_get_main ();
1192 vnet_hw_interface_t *hi =
1193 vnet_get_sup_hw_interface
1194 (vnm, vnet_buffer (b0)->sw_if_index[VLIB_RX]);
1195
1196 /* Capture pkt if not filtered, or if filter hits */
1197 if (hi->trace_classify_table_index == ~0 ||
1198 vnet_is_packet_traced_inline
1199 (b0, hi->trace_classify_table_index,
1200 0 /* full classify */ ))
1201 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1202 pp->max_bytes_per_pkt);
Dave Barach5ecd5a52019-02-25 15:27:28 -05001203 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001204 }
Damjan Marion650223c2018-11-14 16:55:53 +01001205 }
1206}
1207
1208static_always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001209ethernet_input_inline (vlib_main_t * vm,
1210 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001211 u32 * from, u32 n_packets,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001212 ethernet_input_variant_t variant)
1213{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001214 vnet_main_t *vnm = vnet_get_main ();
1215 ethernet_main_t *em = &ethernet_main;
1216 vlib_node_runtime_t *error_node;
Damjan Marion650223c2018-11-14 16:55:53 +01001217 u32 n_left_from, next_index, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001218 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Damjan Marion067cd622018-07-11 12:47:43 +02001219 u32 thread_index = vm->thread_index;
Dave Barachcfba1e22016-11-16 10:23:50 -05001220 u32 cached_sw_if_index = ~0;
1221 u32 cached_is_l2 = 0; /* shut up gcc */
John Lo1904c472017-03-10 17:15:22 -05001222 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
Matthew Smith42bde452019-11-18 09:35:24 -06001223 ethernet_interface_t *ei = NULL;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001224 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1225 vlib_buffer_t **b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001226
1227 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1228 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1229 else
1230 error_node = node;
1231
Damjan Marion650223c2018-11-14 16:55:53 +01001232 n_left_from = n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001233
1234 next_index = node->cached_next_index;
1235 stats_sw_if_index = node->runtime_data[0];
1236 stats_n_packets = stats_n_bytes = 0;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001237 vlib_get_buffers (vm, from, bufs, n_left_from);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001238
1239 while (n_left_from > 0)
1240 {
1241 u32 n_left_to_next;
1242
1243 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1244
1245 while (n_left_from >= 4 && n_left_to_next >= 2)
1246 {
1247 u32 bi0, bi1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001248 vlib_buffer_t *b0, *b1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001249 u8 next0, next1, error0, error1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001250 u16 type0, orig_type0, type1, orig_type1;
1251 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1252 u32 match_flags0, match_flags1;
1253 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1254 new_sw_if_index1, len1;
1255 vnet_hw_interface_t *hi0, *hi1;
1256 main_intf_t *main_intf0, *main_intf1;
1257 vlan_intf_t *vlan_intf0, *vlan_intf1;
1258 qinq_intf_t *qinq_intf0, *qinq_intf1;
1259 u32 is_l20, is_l21;
Dave Barachcfba1e22016-11-16 10:23:50 -05001260 ethernet_header_t *e0, *e1;
Matthew Smith42bde452019-11-18 09:35:24 -06001261 u64 dmacs[2];
1262 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001263
1264 /* Prefetch next iteration. */
1265 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001266 vlib_prefetch_buffer_header (b[2], STORE);
1267 vlib_prefetch_buffer_header (b[3], STORE);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001268
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001269 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1270 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001271 }
1272
1273 bi0 = from[0];
1274 bi1 = from[1];
1275 to_next[0] = bi0;
1276 to_next[1] = bi1;
1277 from += 2;
1278 to_next += 2;
1279 n_left_to_next -= 2;
1280 n_left_from -= 2;
1281
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001282 b0 = b[0];
1283 b1 = b[1];
1284 b += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001285
1286 error0 = error1 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001287 e0 = vlib_buffer_get_current (b0);
1288 type0 = clib_net_to_host_u16 (e0->type);
1289 e1 = vlib_buffer_get_current (b1);
1290 type1 = clib_net_to_host_u16 (e1->type);
1291
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001292 /* Set the L2 header offset for all packets */
1293 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1294 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1295 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1296 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1297
John Locc532852016-12-14 15:42:45 -05001298 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001299 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
Damjan Marionc6969b52018-02-19 12:14:06 +01001300 && !ethernet_frame_is_any_tagged_x2 (type0,
1301 type1)))
Dave Barachcfba1e22016-11-16 10:23:50 -05001302 {
1303 main_intf_t *intf0;
1304 subint_config_t *subint0;
1305 u32 sw_if_index0, sw_if_index1;
1306
1307 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1308 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1309 is_l20 = cached_is_l2;
1310
1311 /* This is probably wholly unnecessary */
1312 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1313 goto slowpath;
1314
John Lo1904c472017-03-10 17:15:22 -05001315 /* Now sw_if_index0 == sw_if_index1 */
Dave Barachcfba1e22016-11-16 10:23:50 -05001316 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1317 {
1318 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001319 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001320 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001321 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001322 subint0 = &intf0->untagged_subint;
1323 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1324 }
John Lo7714b302016-12-20 16:59:02 -05001325
Dave Barachcfba1e22016-11-16 10:23:50 -05001326 if (PREDICT_TRUE (is_l20 != 0))
1327 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001328 vnet_buffer (b0)->l3_hdr_offset =
1329 vnet_buffer (b0)->l2_hdr_offset +
1330 sizeof (ethernet_header_t);
1331 vnet_buffer (b1)->l3_hdr_offset =
1332 vnet_buffer (b1)->l2_hdr_offset +
1333 sizeof (ethernet_header_t);
1334 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1335 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001336 next0 = em->l2_next;
1337 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001338 next1 = em->l2_next;
1339 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001340 }
John Locc532852016-12-14 15:42:45 -05001341 else
1342 {
Dave Barach99c6dc62021-02-15 12:46:47 -05001343 if (ei && (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3))
John Lo4a302ee2020-05-12 22:34:39 -04001344 goto skip_dmac_check01;
1345
Matthew Smith42bde452019-11-18 09:35:24 -06001346 dmacs[0] = *(u64 *) e0;
1347 dmacs[1] = *(u64 *) e1;
1348
1349 if (ei && vec_len (ei->secondary_addrs))
1350 ethernet_input_inline_dmac_check (hi, dmacs,
1351 dmacs_bad,
1352 2 /* n_packets */ ,
1353 ei,
1354 1 /* have_sec_dmac */ );
1355 else
1356 ethernet_input_inline_dmac_check (hi, dmacs,
1357 dmacs_bad,
1358 2 /* n_packets */ ,
1359 ei,
1360 0 /* have_sec_dmac */ );
1361
1362 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001363 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001364 if (dmacs_bad[1])
John Lo1904c472017-03-10 17:15:22 -05001365 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001366
John Lo4a302ee2020-05-12 22:34:39 -04001367 skip_dmac_check01:
John Lob14826e2018-04-18 15:52:23 -04001368 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001369 determine_next_node (em, variant, 0, type0, b0,
1370 &error0, &next0);
John Lob14826e2018-04-18 15:52:23 -04001371 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001372 determine_next_node (em, variant, 0, type1, b1,
1373 &error1, &next1);
John Locc532852016-12-14 15:42:45 -05001374 }
1375 goto ship_it01;
Dave Barachcfba1e22016-11-16 10:23:50 -05001376 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001377
John Locc532852016-12-14 15:42:45 -05001378 /* Slow-path for the tagged case */
1379 slowpath:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001380 parse_header (variant,
1381 b0,
1382 &type0,
1383 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001384
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001385 parse_header (variant,
1386 b1,
1387 &type1,
1388 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001389
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001390 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1391 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001392
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001393 eth_vlan_table_lookups (em,
1394 vnm,
1395 old_sw_if_index0,
1396 orig_type0,
1397 outer_id0,
1398 inner_id0,
1399 &hi0,
1400 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001401
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001402 eth_vlan_table_lookups (em,
1403 vnm,
1404 old_sw_if_index1,
1405 orig_type1,
1406 outer_id1,
1407 inner_id1,
1408 &hi1,
1409 &main_intf1, &vlan_intf1, &qinq_intf1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001410
Ivan Shvedunov72869432020-10-15 13:19:35 +03001411 identify_subint (em,
1412 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001413 b0,
1414 match_flags0,
1415 main_intf0,
1416 vlan_intf0,
1417 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001418
Ivan Shvedunov72869432020-10-15 13:19:35 +03001419 identify_subint (em,
1420 hi1,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001421 b1,
1422 match_flags1,
1423 main_intf1,
1424 vlan_intf1,
1425 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001426
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001427 // Save RX sw_if_index for later nodes
1428 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1429 error0 !=
1430 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1431 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1432 error1 !=
1433 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001434
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001435 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1436 if (((new_sw_if_index0 != ~0)
1437 && (new_sw_if_index0 != old_sw_if_index0))
1438 || ((new_sw_if_index1 != ~0)
1439 && (new_sw_if_index1 != old_sw_if_index1)))
1440 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001441
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001442 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001443 - vnet_buffer (b0)->l2_hdr_offset;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001444 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001445 - vnet_buffer (b1)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001446
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001447 stats_n_packets += 2;
1448 stats_n_bytes += len0 + len1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001449
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001450 if (PREDICT_FALSE
1451 (!(new_sw_if_index0 == stats_sw_if_index
1452 && new_sw_if_index1 == stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001453 {
1454 stats_n_packets -= 2;
1455 stats_n_bytes -= len0 + len1;
1456
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001457 if (new_sw_if_index0 != old_sw_if_index0
1458 && new_sw_if_index0 != ~0)
1459 vlib_increment_combined_counter (vnm->
1460 interface_main.combined_sw_if_counters
1461 +
1462 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001463 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001464 new_sw_if_index0, 1,
1465 len0);
1466 if (new_sw_if_index1 != old_sw_if_index1
1467 && new_sw_if_index1 != ~0)
1468 vlib_increment_combined_counter (vnm->
1469 interface_main.combined_sw_if_counters
1470 +
1471 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001472 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001473 new_sw_if_index1, 1,
1474 len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001475
1476 if (new_sw_if_index0 == new_sw_if_index1)
1477 {
1478 if (stats_n_packets > 0)
1479 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001480 vlib_increment_combined_counter
1481 (vnm->interface_main.combined_sw_if_counters
1482 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001483 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001484 stats_sw_if_index,
1485 stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001486 stats_n_packets = stats_n_bytes = 0;
1487 }
1488 stats_sw_if_index = new_sw_if_index0;
1489 }
1490 }
1491 }
1492
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001493 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1494 is_l20 = is_l21 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001495
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001496 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1497 &next0);
1498 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1499 &next1);
1500
John Lo1904c472017-03-10 17:15:22 -05001501 ship_it01:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001502 b0->error = error_node->errors[error0];
1503 b1->error = error_node->errors[error1];
1504
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001505 // verify speculative enqueue
1506 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1507 n_left_to_next, bi0, bi1, next0,
1508 next1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001509 }
1510
1511 while (n_left_from > 0 && n_left_to_next > 0)
1512 {
1513 u32 bi0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001514 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001515 u8 error0, next0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001516 u16 type0, orig_type0;
1517 u16 outer_id0, inner_id0;
1518 u32 match_flags0;
1519 u32 old_sw_if_index0, new_sw_if_index0, len0;
1520 vnet_hw_interface_t *hi0;
1521 main_intf_t *main_intf0;
1522 vlan_intf_t *vlan_intf0;
1523 qinq_intf_t *qinq_intf0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001524 ethernet_header_t *e0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001525 u32 is_l20;
Matthew Smith42bde452019-11-18 09:35:24 -06001526 u64 dmacs[2];
1527 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001528
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001529 // Prefetch next iteration
1530 if (n_left_from > 1)
1531 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001532 vlib_prefetch_buffer_header (b[1], STORE);
1533 CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001534 }
1535
1536 bi0 = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001537 to_next[0] = bi0;
1538 from += 1;
1539 to_next += 1;
1540 n_left_from -= 1;
1541 n_left_to_next -= 1;
1542
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001543 b0 = b[0];
1544 b += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001545
1546 error0 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001547 e0 = vlib_buffer_get_current (b0);
1548 type0 = clib_net_to_host_u16 (e0->type);
1549
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001550 /* Set the L2 header offset for all packets */
1551 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1552 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1553
John Locc532852016-12-14 15:42:45 -05001554 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001555 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1556 && !ethernet_frame_is_tagged (type0)))
1557 {
1558 main_intf_t *intf0;
1559 subint_config_t *subint0;
1560 u32 sw_if_index0;
1561
1562 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1563 is_l20 = cached_is_l2;
1564
1565 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1566 {
1567 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001568 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001569 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001570 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001571 subint0 = &intf0->untagged_subint;
1572 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1573 }
John Lo7714b302016-12-20 16:59:02 -05001574
John Lo7714b302016-12-20 16:59:02 -05001575
Dave Barachcfba1e22016-11-16 10:23:50 -05001576 if (PREDICT_TRUE (is_l20 != 0))
1577 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001578 vnet_buffer (b0)->l3_hdr_offset =
1579 vnet_buffer (b0)->l2_hdr_offset +
1580 sizeof (ethernet_header_t);
1581 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001582 next0 = em->l2_next;
1583 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001584 }
John Locc532852016-12-14 15:42:45 -05001585 else
1586 {
Dave Barach99c6dc62021-02-15 12:46:47 -05001587 if (ei && ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
John Lo4a302ee2020-05-12 22:34:39 -04001588 goto skip_dmac_check0;
1589
Matthew Smith42bde452019-11-18 09:35:24 -06001590 dmacs[0] = *(u64 *) e0;
1591
1592 if (ei && vec_len (ei->secondary_addrs))
1593 ethernet_input_inline_dmac_check (hi, dmacs,
1594 dmacs_bad,
1595 1 /* n_packets */ ,
1596 ei,
1597 1 /* have_sec_dmac */ );
1598 else
1599 ethernet_input_inline_dmac_check (hi, dmacs,
1600 dmacs_bad,
1601 1 /* n_packets */ ,
1602 ei,
1603 0 /* have_sec_dmac */ );
1604
1605 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001606 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001607
John Lo4a302ee2020-05-12 22:34:39 -04001608 skip_dmac_check0:
Andrew Yourtchenkoe78bca12018-10-10 16:15:55 +02001609 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001610 determine_next_node (em, variant, 0, type0, b0,
1611 &error0, &next0);
John Locc532852016-12-14 15:42:45 -05001612 }
1613 goto ship_it0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001614 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001615
John Locc532852016-12-14 15:42:45 -05001616 /* Slow-path for the tagged case */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001617 parse_header (variant,
1618 b0,
1619 &type0,
1620 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001621
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001622 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001623
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001624 eth_vlan_table_lookups (em,
1625 vnm,
1626 old_sw_if_index0,
1627 orig_type0,
1628 outer_id0,
1629 inner_id0,
1630 &hi0,
1631 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001632
Ivan Shvedunov72869432020-10-15 13:19:35 +03001633 identify_subint (em,
1634 hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001635 b0,
1636 match_flags0,
1637 main_intf0,
1638 vlan_intf0,
1639 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001640
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001641 // Save RX sw_if_index for later nodes
1642 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1643 error0 !=
1644 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001645
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001646 // Increment subinterface stats
1647 // Note that interface-level counters have already been incremented
1648 // prior to calling this function. Thus only subinterface counters
1649 // are incremented here.
1650 //
Damjan Marion607de1a2016-08-16 22:53:54 +02001651 // Interface level counters include packets received on the main
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001652 // interface and all subinterfaces. Subinterface level counters
1653 // include only those packets received on that subinterface
Ed Warnickecb9cada2015-12-08 15:45:58 -07001654 // Increment stats if the subint is valid and it is not the main intf
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001655 if ((new_sw_if_index0 != ~0)
1656 && (new_sw_if_index0 != old_sw_if_index0))
1657 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001658
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001659 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001660 - vnet_buffer (b0)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001661
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001662 stats_n_packets += 1;
1663 stats_n_bytes += len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001664
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001665 // Batch stat increments from the same subinterface so counters
Damjan Marion607de1a2016-08-16 22:53:54 +02001666 // don't need to be incremented for every packet.
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001667 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1668 {
1669 stats_n_packets -= 1;
1670 stats_n_bytes -= len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001671
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001672 if (new_sw_if_index0 != ~0)
1673 vlib_increment_combined_counter
1674 (vnm->interface_main.combined_sw_if_counters
1675 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001676 thread_index, new_sw_if_index0, 1, len0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001677 if (stats_n_packets > 0)
1678 {
1679 vlib_increment_combined_counter
1680 (vnm->interface_main.combined_sw_if_counters
1681 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001682 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001683 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1684 stats_n_packets = stats_n_bytes = 0;
1685 }
1686 stats_sw_if_index = new_sw_if_index0;
1687 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001688 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001689
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001690 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1691 is_l20 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001692
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001693 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1694 &next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001695
John Lo1904c472017-03-10 17:15:22 -05001696 ship_it0:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001697 b0->error = error_node->errors[error0];
1698
1699 // verify speculative enqueue
Ed Warnickecb9cada2015-12-08 15:45:58 -07001700 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1701 to_next, n_left_to_next,
1702 bi0, next0);
1703 }
1704
1705 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1706 }
1707
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001708 // Increment any remaining batched stats
1709 if (stats_n_packets > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001710 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001711 vlib_increment_combined_counter
1712 (vnm->interface_main.combined_sw_if_counters
1713 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001714 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001715 node->runtime_data[0] = stats_sw_if_index;
1716 }
Damjan Marion650223c2018-11-14 16:55:53 +01001717}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001718
Damjan Marion5beecec2018-09-10 13:09:21 +02001719VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1720 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001721 vlib_frame_t * frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001722{
Damjan Marion650223c2018-11-14 16:55:53 +01001723 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001724 u32 *from = vlib_frame_vector_args (frame);
1725 u32 n_packets = frame->n_vectors;
1726
1727 ethernet_input_trace (vm, node, frame);
1728
1729 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1730 {
Damjan Marion650223c2018-11-14 16:55:53 +01001731 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
Damjan Marion650223c2018-11-14 16:55:53 +01001732 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001733 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1734 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
Damjan Marion650223c2018-11-14 16:55:53 +01001735 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001736 else
1737 ethernet_input_inline (vm, node, from, n_packets,
1738 ETHERNET_INPUT_VARIANT_ETHERNET);
Damjan Marion650223c2018-11-14 16:55:53 +01001739 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001740}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001741
Damjan Marion5beecec2018-09-10 13:09:21 +02001742VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1743 vlib_node_runtime_t * node,
1744 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001745{
Damjan Marion650223c2018-11-14 16:55:53 +01001746 u32 *from = vlib_frame_vector_args (from_frame);
1747 u32 n_packets = from_frame->n_vectors;
1748 ethernet_input_trace (vm, node, from_frame);
1749 ethernet_input_inline (vm, node, from, n_packets,
1750 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1751 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001752}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001753
Damjan Marion5beecec2018-09-10 13:09:21 +02001754VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1755 vlib_node_runtime_t * node,
1756 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001757{
Damjan Marion650223c2018-11-14 16:55:53 +01001758 u32 *from = vlib_frame_vector_args (from_frame);
1759 u32 n_packets = from_frame->n_vectors;
1760 ethernet_input_trace (vm, node, from_frame);
1761 ethernet_input_inline (vm, node, from, n_packets,
1762 ETHERNET_INPUT_VARIANT_NOT_L2);
1763 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001764}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001765
1766
1767// Return the subinterface config struct for the given sw_if_index
1768// Also return via parameter the appropriate match flags for the
1769// configured number of tags.
1770// On error (unsupported or not ethernet) return 0.
1771static subint_config_t *
1772ethernet_sw_interface_get_config (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001773 u32 sw_if_index,
1774 u32 * flags, u32 * unsupported)
1775{
1776 ethernet_main_t *em = &ethernet_main;
1777 vnet_hw_interface_t *hi;
1778 vnet_sw_interface_t *si;
1779 main_intf_t *main_intf;
1780 vlan_table_t *vlan_table;
1781 qinq_table_t *qinq_table;
1782 subint_config_t *subint = 0;
1783
Ed Warnickecb9cada2015-12-08 15:45:58 -07001784 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1785
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001786 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1787 {
1788 *unsupported = 0;
1789 goto done; // non-ethernet interface
1790 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001791
1792 // ensure there's an entry for the main intf (shouldn't really be necessary)
1793 vec_validate (em->main_intfs, hi->hw_if_index);
1794 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1795
1796 // Locate the subint for the given ethernet config
1797 si = vnet_get_sw_interface (vnm, sw_if_index);
1798
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001799 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1800 {
1801 p2p_ethernet_main_t *p2pm = &p2p_main;
1802 u32 p2pe_sw_if_index =
1803 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1804 if (p2pe_sw_if_index == ~0)
1805 {
1806 pool_get (p2pm->p2p_subif_pool, subint);
1807 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1808 }
1809 else
1810 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1811 *flags = SUBINT_CONFIG_P2P;
1812 }
Neale Ranns17ff3c12018-07-04 10:24:24 -07001813 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1814 {
1815 pipe_t *pipe;
1816
1817 pipe = pipe_get (sw_if_index);
1818 subint = &pipe->subint;
1819 *flags = SUBINT_CONFIG_P2P;
1820 }
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001821 else if (si->sub.eth.flags.default_sub)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001822 {
1823 subint = &main_intf->default_subint;
Mike Bly88076742018-09-24 10:13:06 -07001824 *flags = SUBINT_CONFIG_MATCH_1_TAG |
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001825 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1826 }
1827 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1828 {
1829 // if no flags are set then this is a main interface
1830 // so treat as untagged
1831 subint = &main_intf->untagged_subint;
1832 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1833 }
1834 else
1835 {
1836 // one or two tags
1837 // first get the vlan table
1838 if (si->sub.eth.flags.dot1ad)
1839 {
1840 if (main_intf->dot1ad_vlans == 0)
1841 {
1842 // Allocate a vlan table from the pool
1843 pool_get (em->vlan_pool, vlan_table);
1844 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1845 }
1846 else
1847 {
1848 // Get ptr to existing vlan table
1849 vlan_table =
1850 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1851 }
1852 }
1853 else
1854 { // dot1q
1855 if (main_intf->dot1q_vlans == 0)
1856 {
1857 // Allocate a vlan table from the pool
1858 pool_get (em->vlan_pool, vlan_table);
1859 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1860 }
1861 else
1862 {
1863 // Get ptr to existing vlan table
1864 vlan_table =
1865 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1866 }
1867 }
1868
1869 if (si->sub.eth.flags.one_tag)
1870 {
1871 *flags = si->sub.eth.flags.exact_match ?
1872 SUBINT_CONFIG_MATCH_1_TAG :
1873 (SUBINT_CONFIG_MATCH_1_TAG |
1874 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1875
1876 if (si->sub.eth.flags.outer_vlan_id_any)
1877 {
1878 // not implemented yet
1879 *unsupported = 1;
1880 goto done;
1881 }
1882 else
1883 {
1884 // a single vlan, a common case
1885 subint =
1886 &vlan_table->vlans[si->sub.eth.
1887 outer_vlan_id].single_tag_subint;
1888 }
1889
1890 }
1891 else
1892 {
1893 // Two tags
1894 *flags = si->sub.eth.flags.exact_match ?
1895 SUBINT_CONFIG_MATCH_2_TAG :
1896 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1897
1898 if (si->sub.eth.flags.outer_vlan_id_any
1899 && si->sub.eth.flags.inner_vlan_id_any)
1900 {
1901 // not implemented yet
1902 *unsupported = 1;
1903 goto done;
1904 }
1905
1906 if (si->sub.eth.flags.inner_vlan_id_any)
1907 {
1908 // a specific outer and "any" inner
1909 // don't need a qinq table for this
1910 subint =
1911 &vlan_table->vlans[si->sub.eth.
1912 outer_vlan_id].inner_any_subint;
1913 if (si->sub.eth.flags.exact_match)
1914 {
1915 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1916 }
1917 else
1918 {
1919 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1920 SUBINT_CONFIG_MATCH_3_TAG;
1921 }
1922 }
1923 else
1924 {
1925 // a specific outer + specifc innner vlan id, a common case
1926
1927 // get the qinq table
1928 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1929 {
1930 // Allocate a qinq table from the pool
1931 pool_get (em->qinq_pool, qinq_table);
1932 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1933 qinq_table - em->qinq_pool;
1934 }
1935 else
1936 {
1937 // Get ptr to existing qinq table
1938 qinq_table =
1939 vec_elt_at_index (em->qinq_pool,
1940 vlan_table->vlans[si->sub.
1941 eth.outer_vlan_id].
1942 qinqs);
1943 }
1944 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1945 }
1946 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001947 }
1948
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001949done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001950 return subint;
1951}
1952
Damjan Marion5beecec2018-09-10 13:09:21 +02001953static clib_error_t *
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001954ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001955{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001956 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001957 u32 placeholder_flags;
1958 u32 placeholder_unsup;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001959 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001960
1961 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001962 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001963 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1964 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001965
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001966 if (subint == 0)
1967 {
1968 // not implemented yet or not ethernet
1969 goto done;
1970 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001971
1972 subint->sw_if_index =
1973 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1974
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001975done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001976 return error;
1977}
1978
1979VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1980
1981
Damjan Marion5beecec2018-09-10 13:09:21 +02001982#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -07001983// Set the L2/L3 mode for the subinterface
1984void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001985ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001986{
1987 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001988 u32 placeholder_flags;
1989 u32 placeholder_unsup;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001990 int is_port;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001991 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001992
1993 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1994
1995 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001996 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001997 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1998 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001999
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002000 if (subint == 0)
2001 {
2002 // unimplemented or not ethernet
2003 goto done;
2004 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002005
2006 // Double check that the config we found is for our interface (or the interface is down)
2007 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2008
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002009 if (l2)
2010 {
2011 subint->flags |= SUBINT_CONFIG_L2;
2012 if (is_port)
2013 subint->flags |=
2014 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
2015 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
2016 }
2017 else
2018 {
2019 subint->flags &= ~SUBINT_CONFIG_L2;
2020 if (is_port)
2021 subint->flags &=
2022 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
2023 | SUBINT_CONFIG_MATCH_3_TAG);
2024 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002025
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002026done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002027 return;
2028}
2029
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002030/*
2031 * Set the L2/L3 mode for the subinterface regardless of port
2032 */
2033void
2034ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002035 u32 sw_if_index, u32 l2)
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002036{
2037 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04002038 u32 placeholder_flags;
2039 u32 placeholder_unsup;
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002040
2041 /* Find the config for this subinterface */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002042 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04002043 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
2044 &placeholder_unsup);
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002045
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002046 if (subint == 0)
2047 {
2048 /* unimplemented or not ethernet */
2049 goto done;
2050 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002051
2052 /*
2053 * Double check that the config we found is for our interface (or the
2054 * interface is down)
2055 */
2056 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2057
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002058 if (l2)
2059 {
2060 subint->flags |= SUBINT_CONFIG_L2;
2061 }
2062 else
2063 {
2064 subint->flags &= ~SUBINT_CONFIG_L2;
2065 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002066
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002067done:
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002068 return;
2069}
Damjan Marion5beecec2018-09-10 13:09:21 +02002070#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07002071
2072static clib_error_t *
2073ethernet_sw_interface_add_del (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002074 u32 sw_if_index, u32 is_create)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002075{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002076 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002077 subint_config_t *subint;
2078 u32 match_flags;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002079 u32 unsupported = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002080
2081 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002082 subint =
2083 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
2084 &unsupported);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002085
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002086 if (subint == 0)
2087 {
2088 // not implemented yet or not ethernet
2089 if (unsupported)
2090 {
Damjan Marion607de1a2016-08-16 22:53:54 +02002091 // this is the NYI case
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002092 error = clib_error_return (0, "not implemented yet");
2093 }
2094 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002095 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002096
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002097 if (!is_create)
2098 {
2099 subint->flags = 0;
2100 return error;
2101 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002102
2103 // Initialize the subint
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002104 if (subint->flags & SUBINT_CONFIG_VALID)
2105 {
2106 // Error vlan already in use
2107 error = clib_error_return (0, "vlan is already in use");
2108 }
2109 else
2110 {
Neale Ranns17ff3c12018-07-04 10:24:24 -07002111 // Note that config is L3 by default
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002112 subint->flags = SUBINT_CONFIG_VALID | match_flags;
2113 subint->sw_if_index = ~0; // because interfaces are initially down
2114 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002115
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002116done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002117 return error;
2118}
2119
2120VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2121
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002122static char *ethernet_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002123#define ethernet_error(n,c,s) s,
2124#include "error.def"
2125#undef ethernet_error
2126};
2127
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002128/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002129VLIB_REGISTER_NODE (ethernet_input_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002130 .name = "ethernet-input",
2131 /* Takes a vector of packets. */
2132 .vector_size = sizeof (u32),
Damjan Marion650223c2018-11-14 16:55:53 +01002133 .scalar_size = sizeof (ethernet_input_frame_t),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002134 .n_errors = ETHERNET_N_ERROR,
2135 .error_strings = ethernet_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002136 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2137 .next_nodes = {
2138#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2139 foreach_ethernet_input_next
2140#undef _
2141 },
Ed Warnickecb9cada2015-12-08 15:45:58 -07002142 .format_buffer = format_ethernet_header_with_length,
2143 .format_trace = format_ethernet_input_trace,
2144 .unformat_buffer = unformat_ethernet_header,
2145};
2146
Damjan Marion5beecec2018-09-10 13:09:21 +02002147VLIB_REGISTER_NODE (ethernet_input_type_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002148 .name = "ethernet-input-type",
2149 /* Takes a vector of packets. */
2150 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002151 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2152 .next_nodes = {
2153#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2154 foreach_ethernet_input_next
2155#undef _
2156 },
2157};
2158
Damjan Marion5beecec2018-09-10 13:09:21 +02002159VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002160 .name = "ethernet-input-not-l2",
2161 /* Takes a vector of packets. */
2162 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002163 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2164 .next_nodes = {
2165#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2166 foreach_ethernet_input_next
2167#undef _
2168 },
2169};
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002170/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002171
Damjan Marion5beecec2018-09-10 13:09:21 +02002172#ifndef CLIB_MARCH_VARIANT
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002173void
2174ethernet_set_rx_redirect (vnet_main_t * vnm,
2175 vnet_hw_interface_t * hi, u32 enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002176{
2177 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2178 // don't go directly to ip4-input)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002179 vnet_hw_interface_rx_redirect_to_node
2180 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002181}
2182
2183
2184/*
2185 * Initialization and registration for the next_by_ethernet structure
2186 */
2187
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002188clib_error_t *
2189next_by_ethertype_init (next_by_ethertype_t * l3_next)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002190{
2191 l3_next->input_next_by_type = sparse_vec_new
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002192 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002193 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2194
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002195 vec_validate (l3_next->sparse_index_by_input_next_index,
2196 ETHERNET_INPUT_NEXT_DROP);
2197 vec_validate (l3_next->sparse_index_by_input_next_index,
2198 ETHERNET_INPUT_NEXT_PUNT);
2199 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2200 SPARSE_VEC_INVALID_INDEX;
2201 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2202 SPARSE_VEC_INVALID_INDEX;
2203
Damjan Marion607de1a2016-08-16 22:53:54 +02002204 /*
2205 * Make sure we don't wipe out an ethernet registration by mistake
Dave Barach1f49ed62016-02-24 11:29:06 -05002206 * Can happen if init function ordering constraints are missing.
2207 */
2208 if (CLIB_DEBUG > 0)
2209 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002210 ethernet_main_t *em = &ethernet_main;
2211 ASSERT (em->next_by_ethertype_register_called == 0);
Dave Barach1f49ed62016-02-24 11:29:06 -05002212 }
2213
Ed Warnickecb9cada2015-12-08 15:45:58 -07002214 return 0;
2215}
2216
2217// Add an ethertype -> next index mapping to the structure
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002218clib_error_t *
2219next_by_ethertype_register (next_by_ethertype_t * l3_next,
2220 u32 ethertype, u32 next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002221{
2222 u32 i;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002223 u16 *n;
2224 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002225
Dave Barach1f49ed62016-02-24 11:29:06 -05002226 if (CLIB_DEBUG > 0)
2227 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002228 ethernet_main_t *em = &ethernet_main;
Dave Barach1f49ed62016-02-24 11:29:06 -05002229 em->next_by_ethertype_register_called = 1;
2230 }
2231
Ed Warnickecb9cada2015-12-08 15:45:58 -07002232 /* Setup ethernet type -> next index sparse vector mapping. */
2233 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2234 n[0] = next_index;
2235
2236 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2237 is updated. */
2238 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2239 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002240 l3_next->
2241 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002242
2243 // do not allow the cached next index's to be updated if L3
2244 // redirect is enabled, as it will have overwritten them
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002245 if (!em->redirect_l3)
2246 {
2247 // Cache common ethertypes directly
2248 if (ethertype == ETHERNET_TYPE_IP4)
2249 {
2250 l3_next->input_next_ip4 = next_index;
2251 }
2252 else if (ethertype == ETHERNET_TYPE_IP6)
2253 {
2254 l3_next->input_next_ip6 = next_index;
2255 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002256 else if (ethertype == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002257 {
2258 l3_next->input_next_mpls = next_index;
2259 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002260 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002261 return 0;
2262}
2263
Dave Barachf8d50682019-05-14 18:01:44 -04002264void
2265ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002266{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002267 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2268 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002269
2270 ethernet_setup_node (vm, ethernet_input_node.index);
2271 ethernet_setup_node (vm, ethernet_input_type_node.index);
2272 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2273
2274 next_by_ethertype_init (&em->l3_next);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002275
Ed Warnickecb9cada2015-12-08 15:45:58 -07002276 // Initialize pools and vector for vlan parsing
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002277 vec_validate (em->main_intfs, 10); // 10 main interfaces
2278 pool_alloc (em->vlan_pool, 10);
2279 pool_alloc (em->qinq_pool, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002280
2281 // The first vlan pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002282 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002283 // The first qinq pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002284 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002285}
2286
Ed Warnickecb9cada2015-12-08 15:45:58 -07002287void
2288ethernet_register_input_type (vlib_main_t * vm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002289 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002290{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002291 ethernet_main_t *em = &ethernet_main;
2292 ethernet_type_info_t *ti;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002293 u32 i;
2294
2295 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002296 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002297 if (error)
2298 clib_error_report (error);
2299 }
2300
2301 ti = ethernet_get_type_info (em, type);
Dave Barach4bda2d92019-07-03 15:21:50 -04002302 if (ti == 0)
2303 {
2304 clib_warning ("type_info NULL for type %d", type);
2305 return;
2306 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002307 ti->node_index = node_index;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002308 ti->next_index = vlib_node_add_next (vm,
2309 ethernet_input_node.index, node_index);
2310 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002311 ASSERT (i == ti->next_index);
2312
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002313 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002314 ASSERT (i == ti->next_index);
2315
2316 // Add the L3 node for this ethertype to the next nodes structure
2317 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2318
2319 // Call the registration functions for other nodes that want a mapping
2320 l2bvi_register_input_type (vm, type, node_index);
2321}
2322
2323void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002324ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002325{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002326 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002327 u32 i;
2328
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002329 em->l2_next =
2330 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002331
Damjan Marion607de1a2016-08-16 22:53:54 +02002332 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002333 * Even if we never use these arcs, we have to align the next indices...
2334 */
2335 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2336
2337 ASSERT (i == em->l2_next);
2338
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002339 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002340 ASSERT (i == em->l2_next);
2341}
2342
2343// Register a next node for L3 redirect, and enable L3 redirect
2344void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002345ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002346{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002347 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002348 u32 i;
2349
2350 em->redirect_l3 = 1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002351 em->redirect_l3_next = vlib_node_add_next (vm,
2352 ethernet_input_node.index,
2353 node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002354 /*
2355 * Change the cached next nodes to the redirect node
2356 */
2357 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2358 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2359 em->l3_next.input_next_mpls = em->redirect_l3_next;
2360
2361 /*
2362 * Even if we never use these arcs, we have to align the next indices...
2363 */
2364 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2365
2366 ASSERT (i == em->redirect_l3_next);
jerryianff82ed62016-12-05 17:13:00 +08002367
2368 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2369
2370 ASSERT (i == em->redirect_l3_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002371}
Damjan Marion5beecec2018-09-10 13:09:21 +02002372#endif
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002373
2374/*
2375 * fd.io coding-style-patch-verification: ON
2376 *
2377 * Local Variables:
2378 * eval: (c-set-style "gnu")
2379 * End:
2380 */