blob: 551754dca6da05b67aa76ed15926856c7a67b11d [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Damjan Marion650223c2018-11-14 16:55:53 +01002 * Copyright (c) 2018 Cisco and/or its affiliates.
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ethernet_node.c: ethernet packet processing
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vnet/pg/pg.h>
42#include <vnet/ethernet/ethernet.h>
Pavel Kotucek15ac81c2017-06-20 14:00:26 +020043#include <vnet/ethernet/p2p_ethernet.h>
Neale Ranns17ff3c12018-07-04 10:24:24 -070044#include <vnet/devices/pipe/pipe.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045#include <vppinfra/sparse_vec.h>
46#include <vnet/l2/l2_bvi.h>
Dave Barach9137e542019-09-13 17:47:50 -040047#include <vnet/classify/trace_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
Damjan Marion650223c2018-11-14 16:55:53 +010052 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070056typedef enum
57{
Ed Warnickecb9cada2015-12-08 15:45:58 -070058#define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
60#undef _
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070061 ETHERNET_INPUT_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070062} ethernet_input_next_t;
63
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070064typedef struct
65{
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 u8 packet_data[32];
Damjan Marion650223c2018-11-14 16:55:53 +010067 u16 frame_flags;
68 ethernet_input_frame_t frame_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} ethernet_input_trace_t;
70
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070071static u8 *
72format_ethernet_input_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070076 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
Damjan Marion650223c2018-11-14 16:55:53 +010077 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078
Damjan Marion650223c2018-11-14 16:55:53 +010079 if (t->frame_flags)
80 {
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 s = format (s, "%U", format_ethernet_header, t->packet_data);
88
89 return s;
90}
91
Damjan Marione849da22018-09-12 13:32:01 +020092extern vlib_node_registration_t ethernet_input_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070094typedef enum
95{
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
Ed Warnickecb9cada2015-12-08 15:45:58 -070098 ETHERNET_INPUT_VARIANT_NOT_L2,
99} ethernet_input_variant_t;
100
101
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102// Parse the ethernet header to extract vlan tags and innermost ethertype
103static_always_inline void
104parse_header (ethernet_input_variant_t variant,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700105 vlib_buffer_t * b0,
106 u16 * type,
107 u16 * orig_type,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
109{
Chris Luke194ebc52016-04-25 14:26:55 -0400110 u8 vlan_count;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700111
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
114 {
115 ethernet_header_t *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116
Zhiyong Yang9f833582020-04-11 14:36:55 +0000117 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Damjan Marion072401e2017-07-13 18:53:27 +0200119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
Steven35de3b32017-12-03 23:40:54 -0800120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700122 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700124 *type = clib_net_to_host_u16 (e0->type);
125 }
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
127 {
128 // here when prior node was LLC/SNAP processing
129 u16 *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
Zhiyong Yang9f833582020-04-11 14:36:55 +0000131 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700133 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700135 *type = clib_net_to_host_u16 (e0[0]);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137
138 // save for distinguishing between dot1q and dot1ad later
139 *orig_type = *type;
140
141 // default the tags to 0 (used if there is no corresponding tag)
142 *outer_id = 0;
143 *inner_id = 0;
144
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
Chris Luke194ebc52016-04-25 14:26:55 -0400146 vlan_count = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 // check for vlan encaps
Damjan Marionb94bdad2016-09-19 11:32:03 +0200149 if (ethernet_frame_is_tagged (*type))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700150 {
151 ethernet_vlan_header_t *h0;
152 u16 tag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155
Zhiyong Yang9f833582020-04-11 14:36:55 +0000156 h0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
159
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700160 *outer_id = tag & 0xfff;
Neale Ranns30d0fd42017-05-30 07:30:04 -0700161 if (0 == *outer_id)
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700164 *type = clib_net_to_host_u16 (h0->type);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165
166 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700167 vlan_count = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700169 if (*type == ETHERNET_TYPE_VLAN)
170 {
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
173
Zhiyong Yang9f833582020-04-11 14:36:55 +0000174 h0 = vlib_buffer_get_current (b0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700175
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
177
178 *inner_id = tag & 0xfff;
179
180 *type = clib_net_to_host_u16 (h0->type);
181
182 vlib_buffer_advance (b0, sizeof (h0[0]));
183 vlan_count = 2;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700184 if (*type == ETHERNET_TYPE_VLAN)
185 {
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300188
189 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700190 vlan_count = 3; // "unknown" number, aka, 3-or-more
191 }
192 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700194 ethernet_buffer_set_vlan_count (b0, vlan_count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195}
196
Matthew Smith42bde452019-11-18 09:35:24 -0600197static_always_inline void
198ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
199 u64 * dmacs, u8 * dmacs_bad,
200 u32 n_packets, ethernet_interface_t * ei,
201 u8 have_sec_dmac);
202
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203// Determine the subinterface for this packet, given the result of the
204// vlan table lookups and vlan header parsing. Check the most specific
205// matches first.
206static_always_inline void
207identify_subint (vnet_hw_interface_t * hi,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700208 vlib_buffer_t * b0,
209 u32 match_flags,
210 main_intf_t * main_intf,
211 vlan_intf_t * vlan_intf,
212 qinq_intf_t * qinq_intf,
213 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700214{
215 u32 matched;
216
Damjan Marionddf6e082018-11-26 16:05:07 +0100217 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
218 qinq_intf, new_sw_if_index, error0, is_l2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700220 if (matched)
221 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700222 // Perform L3 my-mac filter
John Lo4a302ee2020-05-12 22:34:39 -0400223 // A unicast packet arriving on an L3 interface must have a dmac
224 // matching the interface mac. If interface has STATUS_L3 bit set
225 // mac filter is already done.
226 if (!(*is_l2 || (hi->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700227 {
Matthew Smith42bde452019-11-18 09:35:24 -0600228 u64 dmacs[2];
229 u8 dmacs_bad[2];
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700230 ethernet_header_t *e0;
Matthew Smith42bde452019-11-18 09:35:24 -0600231 ethernet_interface_t *ei0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232
Matthew Smith42bde452019-11-18 09:35:24 -0600233 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
234 dmacs[0] = *(u64 *) e0;
235 ei0 = ethernet_get_interface (&ethernet_main, hi->hw_if_index);
236
237 if (ei0 && vec_len (ei0->secondary_addrs))
238 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
239 1 /* n_packets */ , ei0,
240 1 /* have_sec_dmac */ );
241 else
242 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
243 1 /* n_packets */ , ei0,
244 0 /* have_sec_dmac */ );
Matthew Smith42bde452019-11-18 09:35:24 -0600245 if (dmacs_bad[0])
246 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700247 }
248
249 // Check for down subinterface
250 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252}
253
254static_always_inline void
255determine_next_node (ethernet_main_t * em,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700256 ethernet_input_variant_t variant,
257 u32 is_l20,
258 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259{
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200260 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
261 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
262
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700263 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
264 {
265 // some error occurred
266 *next0 = ETHERNET_INPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700267 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700268 else if (is_l20)
269 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700270 // record the L2 len and reset the buffer so the L2 header is preserved
John Lob14826e2018-04-18 15:52:23 -0400271 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
272 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
273 *next0 = em->l2_next;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300274 ASSERT (vnet_buffer (b0)->l2.l2_len ==
275 ethernet_buffer_header_size (b0));
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200276 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700277
278 // check for common IP/MPLS ethertypes
279 }
280 else if (type0 == ETHERNET_TYPE_IP4)
281 {
282 *next0 = em->l3_next.input_next_ip4;
283 }
284 else if (type0 == ETHERNET_TYPE_IP6)
285 {
286 *next0 = em->l3_next.input_next_ip6;
287 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800288 else if (type0 == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700289 {
290 *next0 = em->l3_next.input_next_mpls;
291
292 }
293 else if (em->redirect_l3)
294 {
295 // L3 Redirect is on, the cached common next nodes will be
296 // pointing to the redirect node, catch the uncommon types here
297 *next0 = em->redirect_l3_next;
298 }
299 else
300 {
301 // uncommon ethertype, check table
302 u32 i0;
303 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
304 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
305 *error0 =
306 i0 ==
307 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
308
309 // The table is not populated with LLC values, so check that now.
Damjan Marion607de1a2016-08-16 22:53:54 +0200310 // If variant is variant_ethernet then we came from LLC processing. Don't
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700311 // go back there; drop instead using by keeping the drop/bad table result.
312 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
313 {
314 *next0 = ETHERNET_INPUT_NEXT_LLC;
315 }
316 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317}
318
Damjan Marion650223c2018-11-14 16:55:53 +0100319
320/* following vector code relies on following assumptions */
321STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
322STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
323STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
324STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
325 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
326 "l3_hdr_offset must follow l2_hdr_offset");
327
328static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100329eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100330{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100331 i16 adv = sizeof (ethernet_header_t);
332 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
333 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
334
Damjan Marion650223c2018-11-14 16:55:53 +0100335#ifdef CLIB_HAVE_VEC256
336 /* to reduce number of small loads/stores we are loading first 64 bits
337 of each buffer metadata into 256-bit register so we can advance
338 current_data, current_length and flags.
339 Observed saving of this code is ~2 clocks per packet */
340 u64x4 r, radv;
341
342 /* vector if signed 16 bit integers used in signed vector add operation
343 to advnce current_data and current_length */
344 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
345 i16x16 adv4 = {
346 adv, -adv, 0, 0, adv, -adv, 0, 0,
347 adv, -adv, 0, 0, adv, -adv, 0, 0
348 };
349
350 /* load 4 x 64 bits */
351 r = u64x4_gather (b[0], b[1], b[2], b[3]);
352
353 /* set flags */
354 r |= (u64x4) flags4;
355
356 /* advance buffer */
357 radv = (u64x4) ((i16x16) r + adv4);
358
359 /* write 4 x 64 bits */
360 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
361
362 /* use old current_data as l2_hdr_offset and new current_data as
363 l3_hdr_offset */
364 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
365
366 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
367 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
368 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
369 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
370 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
371
Damjan Marione9cebdf2018-11-21 00:47:42 +0100372 if (is_l3)
373 {
374 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
375 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
376 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
377 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
Damjan Marion650223c2018-11-14 16:55:53 +0100378
Damjan Marione9cebdf2018-11-21 00:47:42 +0100379 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
380 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
381 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
382 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
383 }
384 else
385 {
386 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
387 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
388 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
389 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
390
391 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
392 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
393 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
394 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
395 }
Damjan Marion650223c2018-11-14 16:55:53 +0100396
397#else
398 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
399 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
400 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
401 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
402 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
403 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
404 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
405 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
406
407 if (is_l3)
408 {
409 vlib_buffer_advance (b[0], adv);
410 vlib_buffer_advance (b[1], adv);
411 vlib_buffer_advance (b[2], adv);
412 vlib_buffer_advance (b[3], adv);
413 }
414
415 b[0]->flags |= flags;
416 b[1]->flags |= flags;
417 b[2]->flags |= flags;
418 b[3]->flags |= flags;
419#endif
420
421 if (!is_l3)
422 {
423 vnet_buffer (b[0])->l2.l2_len = adv;
424 vnet_buffer (b[1])->l2.l2_len = adv;
425 vnet_buffer (b[2])->l2.l2_len = adv;
426 vnet_buffer (b[3])->l2.l2_len = adv;
427 }
428}
429
430static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100431eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100432{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100433 i16 adv = sizeof (ethernet_header_t);
434 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
435 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
436
Damjan Marion650223c2018-11-14 16:55:53 +0100437 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
438 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
439
440 if (is_l3)
441 vlib_buffer_advance (b[0], adv);
442 b[0]->flags |= flags;
443 if (!is_l3)
444 vnet_buffer (b[0])->l2.l2_len = adv;
445}
446
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100447
Damjan Marion650223c2018-11-14 16:55:53 +0100448static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100449eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
450 u64 * dmacs, int offset, int dmac_check)
Damjan Marion650223c2018-11-14 16:55:53 +0100451{
Damjan Marion650223c2018-11-14 16:55:53 +0100452 ethernet_header_t *e;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100453 e = vlib_buffer_get_current (b[offset]);
454#ifdef CLIB_HAVE_VEC128
455 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
456 etype[offset] = ((u16x8) r)[3];
457 tags[offset] = r[1];
458#else
459 etype[offset] = e->type;
460 tags[offset] = *(u64 *) (e + 1);
461#endif
Damjan Marion650223c2018-11-14 16:55:53 +0100462
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100463 if (dmac_check)
464 dmacs[offset] = *(u64 *) e;
465}
Damjan Marion650223c2018-11-14 16:55:53 +0100466
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100467static_always_inline u16
468eth_input_next_by_type (u16 etype)
469{
470 ethernet_main_t *em = &ethernet_main;
471
472 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
473 vec_elt (em->l3_next.input_next_by_type,
474 sparse_vec_index (em->l3_next.input_next_by_type, etype));
475}
476
477typedef struct
478{
479 u64 tag, mask;
480 u32 sw_if_index;
481 u16 type, len, next;
482 i16 adv;
483 u8 err, n_tags;
484 u64 n_packets, n_bytes;
485} eth_input_tag_lookup_t;
486
487static_always_inline void
488eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
489 eth_input_tag_lookup_t * l)
490{
491 if (l->n_packets == 0 || l->sw_if_index == ~0)
492 return;
493
494 if (l->adv > 0)
495 l->n_bytes += l->n_packets * l->len;
496
497 vlib_increment_combined_counter
498 (vnm->interface_main.combined_sw_if_counters +
499 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
500 l->n_packets, l->n_bytes);
501}
502
503static_always_inline void
504eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
505 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
506 u64 tag, u16 * next, vlib_buffer_t * b,
507 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
508 int main_is_l3, int check_dmac)
509{
510 ethernet_main_t *em = &ethernet_main;
511
512 if ((tag ^ l->tag) & l->mask)
Damjan Marion650223c2018-11-14 16:55:53 +0100513 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100514 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
515 vlan_intf_t *vif;
516 qinq_intf_t *qif;
517 vlan_table_t *vlan_table;
518 qinq_table_t *qinq_table;
519 u16 *t = (u16 *) & tag;
520 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
521 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
522 u32 matched, is_l2, new_sw_if_index;
523
524 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
525 mif->dot1ad_vlans : mif->dot1q_vlans);
526 vif = &vlan_table->vlans[vlan1];
527 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
528 qif = &qinq_table->vlans[vlan2];
529 l->err = ETHERNET_ERROR_NONE;
530 l->type = clib_net_to_host_u16 (t[1]);
531
532 if (l->type == ETHERNET_TYPE_VLAN)
533 {
534 l->type = clib_net_to_host_u16 (t[3]);
535 l->n_tags = 2;
536 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
537 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
538 qif, &new_sw_if_index, &l->err,
539 &is_l2);
540 }
541 else
542 {
543 l->n_tags = 1;
544 if (vlan1 == 0)
545 {
546 new_sw_if_index = hi->sw_if_index;
547 l->err = ETHERNET_ERROR_NONE;
548 matched = 1;
549 is_l2 = main_is_l3 == 0;
550 }
551 else
552 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
553 SUBINT_CONFIG_MATCH_1_TAG, mif,
554 vif, qif, &new_sw_if_index,
555 &l->err, &is_l2);
556 }
557
558 if (l->sw_if_index != new_sw_if_index)
559 {
560 eth_input_update_if_counters (vm, vnm, l);
561 l->n_packets = 0;
562 l->n_bytes = 0;
563 l->sw_if_index = new_sw_if_index;
564 }
565 l->tag = tag;
566 l->mask = (l->n_tags == 2) ?
567 clib_net_to_host_u64 (0xffffffffffffffff) :
568 clib_net_to_host_u64 (0xffffffff00000000);
569
570 if (matched && l->sw_if_index == ~0)
571 l->err = ETHERNET_ERROR_DOWN;
572
573 l->len = sizeof (ethernet_header_t) +
574 l->n_tags * sizeof (ethernet_vlan_header_t);
575 if (main_is_l3)
576 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
577 l->n_tags * sizeof (ethernet_vlan_header_t);
578 else
579 l->adv = is_l2 ? 0 : l->len;
580
581 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
582 l->next = ETHERNET_INPUT_NEXT_DROP;
583 else if (is_l2)
584 l->next = em->l2_next;
585 else if (l->type == ETHERNET_TYPE_IP4)
586 l->next = em->l3_next.input_next_ip4;
587 else if (l->type == ETHERNET_TYPE_IP6)
588 l->next = em->l3_next.input_next_ip6;
589 else if (l->type == ETHERNET_TYPE_MPLS)
590 l->next = em->l3_next.input_next_mpls;
591 else if (em->redirect_l3)
592 l->next = em->redirect_l3_next;
593 else
594 {
595 l->next = eth_input_next_by_type (l->type);
596 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
597 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
598 }
599 }
600
601 if (check_dmac && l->adv > 0 && dmac_bad)
602 {
603 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
604 next[0] = ETHERNET_INPUT_NEXT_PUNT;
605 }
606 else
607 next[0] = l->next;
608
609 vlib_buffer_advance (b, l->adv);
610 vnet_buffer (b)->l2.l2_len = l->len;
611 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
612
613 if (l->err == ETHERNET_ERROR_NONE)
614 {
615 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
616 ethernet_buffer_set_vlan_count (b, l->n_tags);
617 }
618 else
619 b->error = node->errors[l->err];
620
621 /* update counters */
622 l->n_packets += 1;
623 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
624}
625
Matthew G Smithd459bf32019-09-04 15:01:04 -0500626#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
627#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
628
629#ifdef CLIB_HAVE_VEC256
630static_always_inline u32
631is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
632{
633 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
634 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
635 return u8x32_msb_mask ((u8x32) (r0));
636}
Matthew Smith42bde452019-11-18 09:35:24 -0600637#endif
638
Matthew G Smithd459bf32019-09-04 15:01:04 -0500639static_always_inline u8
640is_dmac_bad (u64 dmac, u64 hwaddr)
641{
642 u64 r0 = dmac & DMAC_MASK;
643 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
644}
Matthew G Smithd459bf32019-09-04 15:01:04 -0500645
646static_always_inline u8
647is_sec_dmac_bad (u64 dmac, u64 hwaddr)
648{
649 return ((dmac & DMAC_MASK) != hwaddr);
650}
651
652#ifdef CLIB_HAVE_VEC256
653static_always_inline u32
654is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
655{
656 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
657 r0 = (r0 != u64x4_splat (hwaddr));
658 return u8x32_msb_mask ((u8x32) (r0));
659}
660#endif
661
662static_always_inline u8
663eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
664{
665 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
666 return dmac_bad[0];
667}
668
669static_always_inline u32
670eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
671{
672#ifdef CLIB_HAVE_VEC256
673 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
674#else
675 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
676 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
677 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
678 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
679#endif
680 return *(u32 *) dmac_bad;
681}
682
Matthew Smith42bde452019-11-18 09:35:24 -0600683/*
684 * DMAC check for ethernet_input_inline()
685 *
686 * dmacs and dmacs_bad are arrays that are 2 elements long
687 * n_packets should be 1 or 2 for ethernet_input_inline()
688 */
689static_always_inline void
690ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
691 u64 * dmacs, u8 * dmacs_bad,
692 u32 n_packets, ethernet_interface_t * ei,
693 u8 have_sec_dmac)
694{
695 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
696 u8 bad = 0;
697
698 dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
699 dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));
700
701 bad = dmacs_bad[0] | dmacs_bad[1];
702
703 if (PREDICT_FALSE (bad && have_sec_dmac))
704 {
705 mac_address_t *sec_addr;
706
707 vec_foreach (sec_addr, ei->secondary_addrs)
708 {
709 hwaddr = (*(u64 *) sec_addr) & DMAC_MASK;
710
711 bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
712 eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
713 dmacs_bad + 1));
714
715 if (!bad)
716 return;
717 }
718 }
719}
720
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500721static_always_inline void
722eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
723 u64 * dmacs, u8 * dmacs_bad,
Matthew G Smithd459bf32019-09-04 15:01:04 -0500724 u32 n_packets, ethernet_interface_t * ei,
725 u8 have_sec_dmac)
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500726{
Matthew G Smithd459bf32019-09-04 15:01:04 -0500727 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500728 u64 *dmac = dmacs;
729 u8 *dmac_bad = dmacs_bad;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500730 u32 bad = 0;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500731 i32 n_left = n_packets;
732
733#ifdef CLIB_HAVE_VEC256
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500734 while (n_left > 0)
735 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500736 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
737 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500738
739 /* next */
740 dmac += 8;
741 dmac_bad += 8;
742 n_left -= 8;
743 }
744#else
745 while (n_left > 0)
746 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500747 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
748 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
749 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
750 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500751
752 /* next */
753 dmac += 4;
754 dmac_bad += 4;
755 n_left -= 4;
756 }
757#endif
Matthew G Smithd459bf32019-09-04 15:01:04 -0500758
759 if (have_sec_dmac && bad)
760 {
761 mac_address_t *addr;
762
763 vec_foreach (addr, ei->secondary_addrs)
764 {
765 u64 hwaddr = ((u64 *) addr)[0] & DMAC_MASK;
766 i32 n_left = n_packets;
767 u64 *dmac = dmacs;
768 u8 *dmac_bad = dmacs_bad;
769
770 bad = 0;
771
772 while (n_left > 0)
773 {
774 int adv = 0;
775 int n_bad;
776
777 /* skip any that have already matched */
778 if (!dmac_bad[0])
779 {
780 dmac += 1;
781 dmac_bad += 1;
782 n_left -= 1;
783 continue;
784 }
785
786 n_bad = clib_min (4, n_left);
787
788 /* If >= 4 left, compare 4 together */
789 if (n_bad == 4)
790 {
791 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
792 adv = 4;
793 n_bad = 0;
794 }
795
796 /* handle individually */
797 while (n_bad > 0)
798 {
799 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
800 dmac_bad + adv);
801 adv += 1;
802 n_bad -= 1;
803 }
804
805 dmac += adv;
806 dmac_bad += adv;
807 n_left -= adv;
808 }
809
810 if (!bad) /* can stop looping if everything matched */
811 break;
812 }
813 }
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500814}
815
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100816/* process frame of buffers, store ethertype into array and update
817 buffer metadata fields depending on interface being l2 or l3 assuming that
818 packets are untagged. For tagged packets those fields are updated later.
819 Optionally store Destionation MAC address and tag data into arrays
820 for further processing */
821
822STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
823 "VLIB_FRAME_SIZE must be power of 8");
824static_always_inline void
825eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
826 vnet_hw_interface_t * hi,
827 u32 * buffer_indices, u32 n_packets, int main_is_l3,
828 int ip4_cksum_ok, int dmac_check)
829{
830 ethernet_main_t *em = &ethernet_main;
831 u16 nexts[VLIB_FRAME_SIZE], *next;
832 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
833 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
834 u8 dmacs_bad[VLIB_FRAME_SIZE];
835 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
836 u16 slowpath_indices[VLIB_FRAME_SIZE];
837 u16 n_slowpath, i;
838 u16 next_ip4, next_ip6, next_mpls, next_l2;
839 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
840 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
841 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
842 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
843 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
844 i32 n_left = n_packets;
Zhiyong Yang70312882020-03-27 17:12:35 +0000845 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
846 vlib_buffer_t **b = bufs;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500847 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100848
Zhiyong Yang70312882020-03-27 17:12:35 +0000849 vlib_get_buffers (vm, buffer_indices, b, n_left);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100850
851 while (n_left >= 20)
852 {
853 vlib_buffer_t **ph = b + 16, **pd = b + 8;
Damjan Marion650223c2018-11-14 16:55:53 +0100854
855 vlib_prefetch_buffer_header (ph[0], LOAD);
856 vlib_prefetch_buffer_data (pd[0], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100857 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100858
859 vlib_prefetch_buffer_header (ph[1], LOAD);
860 vlib_prefetch_buffer_data (pd[1], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100861 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100862
863 vlib_prefetch_buffer_header (ph[2], LOAD);
864 vlib_prefetch_buffer_data (pd[2], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100865 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100866
867 vlib_prefetch_buffer_header (ph[3], LOAD);
868 vlib_prefetch_buffer_data (pd[3], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100869 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100870
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100871 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100872
873 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000874 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100875 n_left -= 4;
876 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100877 tag += 4;
878 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100879 }
880 while (n_left >= 4)
881 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100882 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
883 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
884 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
885 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
886 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100887
888 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000889 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100890 n_left -= 4;
891 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100892 tag += 4;
893 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100894 }
895 while (n_left)
896 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100897 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
898 eth_input_adv_and_flags_x1 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100899
900 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000901 b += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100902 n_left -= 1;
903 etype += 1;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100904 tag += 1;
Zhiyong Yangcbe36e42020-05-09 07:13:34 +0000905 dmac += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100906 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100907
908 if (dmac_check)
Matthew G Smithd459bf32019-09-04 15:01:04 -0500909 {
Matthew Smith49389382019-10-02 16:34:27 -0500910 if (ei && vec_len (ei->secondary_addrs))
Matthew G Smithd459bf32019-09-04 15:01:04 -0500911 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
912 ei, 1 /* have_sec_dmac */ );
913 else
914 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
915 ei, 0 /* have_sec_dmac */ );
916 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100917
918 next_ip4 = em->l3_next.input_next_ip4;
919 next_ip6 = em->l3_next.input_next_ip6;
920 next_mpls = em->l3_next.input_next_mpls;
921 next_l2 = em->l2_next;
922
923 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
924 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
925
926#ifdef CLIB_HAVE_VEC256
927 u16x16 et16_ip4 = u16x16_splat (et_ip4);
928 u16x16 et16_ip6 = u16x16_splat (et_ip6);
929 u16x16 et16_mpls = u16x16_splat (et_mpls);
930 u16x16 et16_vlan = u16x16_splat (et_vlan);
931 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
932 u16x16 next16_ip4 = u16x16_splat (next_ip4);
933 u16x16 next16_ip6 = u16x16_splat (next_ip6);
934 u16x16 next16_mpls = u16x16_splat (next_mpls);
935 u16x16 next16_l2 = u16x16_splat (next_l2);
936 u16x16 zero = { 0 };
937 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
938#endif
939
940 etype = etypes;
941 n_left = n_packets;
942 next = nexts;
943 n_slowpath = 0;
944 i = 0;
945
946 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
947 are considered as slowpath, in l2 mode all untagged packets are
948 considered as fastpath */
949 while (n_left > 0)
950 {
951#ifdef CLIB_HAVE_VEC256
952 if (n_left >= 16)
953 {
954 u16x16 r = zero;
955 u16x16 e16 = u16x16_load_unaligned (etype);
956 if (main_is_l3)
957 {
958 r += (e16 == et16_ip4) & next16_ip4;
959 r += (e16 == et16_ip6) & next16_ip6;
960 r += (e16 == et16_mpls) & next16_mpls;
961 }
962 else
963 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
964 u16x16_store_unaligned (r, next);
965
966 if (!u16x16_is_all_zero (r == zero))
967 {
968 if (u16x16_is_all_zero (r))
969 {
970 u16x16_store_unaligned (u16x16_splat (i) + stairs,
971 slowpath_indices + n_slowpath);
972 n_slowpath += 16;
973 }
974 else
975 {
976 for (int j = 0; j < 16; j++)
977 if (next[j] == 0)
978 slowpath_indices[n_slowpath++] = i + j;
979 }
980 }
981
982 etype += 16;
983 next += 16;
984 n_left -= 16;
985 i += 16;
986 continue;
987 }
988#endif
989 if (main_is_l3 && etype[0] == et_ip4)
990 next[0] = next_ip4;
991 else if (main_is_l3 && etype[0] == et_ip6)
992 next[0] = next_ip6;
993 else if (main_is_l3 && etype[0] == et_mpls)
994 next[0] = next_mpls;
995 else if (main_is_l3 == 0 &&
996 etype[0] != et_vlan && etype[0] != et_dot1ad)
997 next[0] = next_l2;
998 else
999 {
1000 next[0] = 0;
1001 slowpath_indices[n_slowpath++] = i;
1002 }
1003
1004 etype += 1;
1005 next += 1;
1006 n_left -= 1;
1007 i += 1;
1008 }
1009
1010 if (n_slowpath)
1011 {
1012 vnet_main_t *vnm = vnet_get_main ();
1013 n_left = n_slowpath;
1014 u16 *si = slowpath_indices;
1015 u32 last_unknown_etype = ~0;
1016 u32 last_unknown_next = ~0;
1017 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
1018 .mask = -1LL,
1019 .tag = tags[si[0]] ^ -1LL,
1020 .sw_if_index = ~0
1021 };
1022
1023 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
1024
1025 while (n_left)
1026 {
1027 i = si[0];
1028 u16 etype = etypes[i];
1029
1030 if (etype == et_vlan)
1031 {
1032 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1033 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1034 &dot1q_lookup, dmacs_bad[i], 0,
1035 main_is_l3, dmac_check);
1036
1037 }
1038 else if (etype == et_dot1ad)
1039 {
1040 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1041 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1042 &dot1ad_lookup, dmacs_bad[i], 1,
1043 main_is_l3, dmac_check);
1044 }
1045 else
1046 {
1047 /* untagged packet with not well known etyertype */
1048 if (last_unknown_etype != etype)
1049 {
1050 last_unknown_etype = etype;
1051 etype = clib_host_to_net_u16 (etype);
1052 last_unknown_next = eth_input_next_by_type (etype);
1053 }
1054 if (dmac_check && main_is_l3 && dmacs_bad[i])
1055 {
1056 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1057 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1058 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1059 }
1060 else
1061 nexts[i] = last_unknown_next;
1062 }
1063
1064 /* next */
1065 n_left--;
1066 si++;
1067 }
1068
1069 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1070 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1071 }
1072
1073 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
Damjan Marion650223c2018-11-14 16:55:53 +01001074}
1075
1076static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001077eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1078 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1079 int ip4_cksum_ok)
Damjan Marion650223c2018-11-14 16:55:53 +01001080{
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001081 ethernet_main_t *em = &ethernet_main;
1082 ethernet_interface_t *ei;
1083 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1084 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1085 subint_config_t *subint0 = &intf0->untagged_subint;
Damjan Marion650223c2018-11-14 16:55:53 +01001086
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001087 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
John Lo4a302ee2020-05-12 22:34:39 -04001088 int int_is_l3 = ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3;
Damjan Marion650223c2018-11-14 16:55:53 +01001089
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001090 if (main_is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +01001091 {
John Lo4a302ee2020-05-12 22:34:39 -04001092 if (int_is_l3 || /* DMAC filter already done by NIC */
1093 ((hi->l2_if_count != 0) && (hi->l3_if_count == 0)))
1094 { /* All L2 usage - DMAC check not needed */
1095 eth_input_process_frame (vm, node, hi, from, n_pkts,
1096 /*is_l3 */ 1, ip4_cksum_ok, 0);
1097 }
Damjan Marion650223c2018-11-14 16:55:53 +01001098 else
John Lo4a302ee2020-05-12 22:34:39 -04001099 { /* DMAC check needed for L3 */
1100 eth_input_process_frame (vm, node, hi, from, n_pkts,
1101 /*is_l3 */ 1, ip4_cksum_ok, 1);
1102 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001103 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001104 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001105 else
Damjan Marion650223c2018-11-14 16:55:53 +01001106 {
John Lo4a302ee2020-05-12 22:34:39 -04001107 if (hi->l3_if_count == 0)
1108 { /* All L2 usage - DMAC check not needed */
1109 eth_input_process_frame (vm, node, hi, from, n_pkts,
1110 /*is_l3 */ 0, ip4_cksum_ok, 0);
1111 }
1112 else
1113 { /* DMAC check needed for L3 */
1114 eth_input_process_frame (vm, node, hi, from, n_pkts,
1115 /*is_l3 */ 0, ip4_cksum_ok, 1);
1116 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001117 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001118 }
1119}
1120
1121static_always_inline void
1122ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1123 vlib_frame_t * from_frame)
1124{
1125 u32 *from, n_left;
Benoît Ganne98477922019-04-10 14:21:11 +02001126 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
Damjan Marion650223c2018-11-14 16:55:53 +01001127 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001128 from = vlib_frame_vector_args (from_frame);
1129 n_left = from_frame->n_vectors;
Damjan Marion650223c2018-11-14 16:55:53 +01001130
Dave Barach5ecd5a52019-02-25 15:27:28 -05001131 while (n_left)
Damjan Marion650223c2018-11-14 16:55:53 +01001132 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001133 ethernet_input_trace_t *t0;
1134 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1135
1136 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1137 {
1138 t0 = vlib_add_trace (vm, node, b0,
1139 sizeof (ethernet_input_trace_t));
1140 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1141 sizeof (t0->packet_data));
1142 t0->frame_flags = from_frame->flags;
1143 clib_memcpy_fast (&t0->frame_data,
1144 vlib_frame_scalar_args (from_frame),
1145 sizeof (ethernet_input_frame_t));
1146 }
1147 from += 1;
1148 n_left -= 1;
Damjan Marion650223c2018-11-14 16:55:53 +01001149 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001150 }
1151
1152 /* rx pcap capture if enabled */
Dave Barach33909772019-09-23 10:27:27 -04001153 if (PREDICT_FALSE (vlib_global_main.pcap.pcap_rx_enable))
Dave Barach5ecd5a52019-02-25 15:27:28 -05001154 {
1155 u32 bi0;
Dave Barach33909772019-09-23 10:27:27 -04001156 vnet_pcap_t *pp = &vlib_global_main.pcap;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001157
1158 from = vlib_frame_vector_args (from_frame);
1159 n_left = from_frame->n_vectors;
1160 while (n_left > 0)
1161 {
Dave Barach9137e542019-09-13 17:47:50 -04001162 int classify_filter_result;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001163 vlib_buffer_t *b0;
1164 bi0 = from[0];
1165 from++;
Dave Barach9137e542019-09-13 17:47:50 -04001166 n_left--;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001167 b0 = vlib_get_buffer (vm, bi0);
Dave Barachf5667c32019-09-25 11:27:46 -04001168 if (pp->filter_classify_table_index != ~0)
Dave Barach9137e542019-09-13 17:47:50 -04001169 {
1170 classify_filter_result =
1171 vnet_is_packet_traced_inline
Dave Barachf5667c32019-09-25 11:27:46 -04001172 (b0, pp->filter_classify_table_index, 0 /* full classify */ );
Dave Barach9137e542019-09-13 17:47:50 -04001173 if (classify_filter_result)
Dave Barach33909772019-09-23 10:27:27 -04001174 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1175 pp->max_bytes_per_pkt);
Dave Barach9137e542019-09-13 17:47:50 -04001176 continue;
1177 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001178
Dave Barach33909772019-09-23 10:27:27 -04001179 if (pp->pcap_sw_if_index == 0 ||
1180 pp->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
Dave Barach5ecd5a52019-02-25 15:27:28 -05001181 {
Dave Barachd28437c2019-11-20 09:28:31 -05001182 vnet_main_t *vnm = vnet_get_main ();
1183 vnet_hw_interface_t *hi =
1184 vnet_get_sup_hw_interface
1185 (vnm, vnet_buffer (b0)->sw_if_index[VLIB_RX]);
1186
1187 /* Capture pkt if not filtered, or if filter hits */
1188 if (hi->trace_classify_table_index == ~0 ||
1189 vnet_is_packet_traced_inline
1190 (b0, hi->trace_classify_table_index,
1191 0 /* full classify */ ))
1192 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1193 pp->max_bytes_per_pkt);
Dave Barach5ecd5a52019-02-25 15:27:28 -05001194 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001195 }
Damjan Marion650223c2018-11-14 16:55:53 +01001196 }
1197}
1198
1199static_always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001200ethernet_input_inline (vlib_main_t * vm,
1201 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001202 u32 * from, u32 n_packets,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001203 ethernet_input_variant_t variant)
1204{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001205 vnet_main_t *vnm = vnet_get_main ();
1206 ethernet_main_t *em = &ethernet_main;
1207 vlib_node_runtime_t *error_node;
Damjan Marion650223c2018-11-14 16:55:53 +01001208 u32 n_left_from, next_index, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001209 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Damjan Marion067cd622018-07-11 12:47:43 +02001210 u32 thread_index = vm->thread_index;
Dave Barachcfba1e22016-11-16 10:23:50 -05001211 u32 cached_sw_if_index = ~0;
1212 u32 cached_is_l2 = 0; /* shut up gcc */
John Lo1904c472017-03-10 17:15:22 -05001213 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
Matthew Smith42bde452019-11-18 09:35:24 -06001214 ethernet_interface_t *ei = NULL;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001215 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1216 vlib_buffer_t **b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001217
1218 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1219 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1220 else
1221 error_node = node;
1222
Damjan Marion650223c2018-11-14 16:55:53 +01001223 n_left_from = n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001224
1225 next_index = node->cached_next_index;
1226 stats_sw_if_index = node->runtime_data[0];
1227 stats_n_packets = stats_n_bytes = 0;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001228 vlib_get_buffers (vm, from, bufs, n_left_from);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001229
1230 while (n_left_from > 0)
1231 {
1232 u32 n_left_to_next;
1233
1234 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1235
1236 while (n_left_from >= 4 && n_left_to_next >= 2)
1237 {
1238 u32 bi0, bi1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001239 vlib_buffer_t *b0, *b1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001240 u8 next0, next1, error0, error1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001241 u16 type0, orig_type0, type1, orig_type1;
1242 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1243 u32 match_flags0, match_flags1;
1244 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1245 new_sw_if_index1, len1;
1246 vnet_hw_interface_t *hi0, *hi1;
1247 main_intf_t *main_intf0, *main_intf1;
1248 vlan_intf_t *vlan_intf0, *vlan_intf1;
1249 qinq_intf_t *qinq_intf0, *qinq_intf1;
1250 u32 is_l20, is_l21;
Dave Barachcfba1e22016-11-16 10:23:50 -05001251 ethernet_header_t *e0, *e1;
Matthew Smith42bde452019-11-18 09:35:24 -06001252 u64 dmacs[2];
1253 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001254
1255 /* Prefetch next iteration. */
1256 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001257 vlib_prefetch_buffer_header (b[2], STORE);
1258 vlib_prefetch_buffer_header (b[3], STORE);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001259
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001260 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1261 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001262 }
1263
1264 bi0 = from[0];
1265 bi1 = from[1];
1266 to_next[0] = bi0;
1267 to_next[1] = bi1;
1268 from += 2;
1269 to_next += 2;
1270 n_left_to_next -= 2;
1271 n_left_from -= 2;
1272
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001273 b0 = b[0];
1274 b1 = b[1];
1275 b += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001276
1277 error0 = error1 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001278 e0 = vlib_buffer_get_current (b0);
1279 type0 = clib_net_to_host_u16 (e0->type);
1280 e1 = vlib_buffer_get_current (b1);
1281 type1 = clib_net_to_host_u16 (e1->type);
1282
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001283 /* Set the L2 header offset for all packets */
1284 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1285 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1286 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1287 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1288
John Locc532852016-12-14 15:42:45 -05001289 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001290 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
Damjan Marionc6969b52018-02-19 12:14:06 +01001291 && !ethernet_frame_is_any_tagged_x2 (type0,
1292 type1)))
Dave Barachcfba1e22016-11-16 10:23:50 -05001293 {
1294 main_intf_t *intf0;
1295 subint_config_t *subint0;
1296 u32 sw_if_index0, sw_if_index1;
1297
1298 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1299 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1300 is_l20 = cached_is_l2;
1301
1302 /* This is probably wholly unnecessary */
1303 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1304 goto slowpath;
1305
John Lo1904c472017-03-10 17:15:22 -05001306 /* Now sw_if_index0 == sw_if_index1 */
Dave Barachcfba1e22016-11-16 10:23:50 -05001307 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1308 {
1309 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001310 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001311 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001312 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001313 subint0 = &intf0->untagged_subint;
1314 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1315 }
John Lo7714b302016-12-20 16:59:02 -05001316
Dave Barachcfba1e22016-11-16 10:23:50 -05001317 if (PREDICT_TRUE (is_l20 != 0))
1318 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001319 vnet_buffer (b0)->l3_hdr_offset =
1320 vnet_buffer (b0)->l2_hdr_offset +
1321 sizeof (ethernet_header_t);
1322 vnet_buffer (b1)->l3_hdr_offset =
1323 vnet_buffer (b1)->l2_hdr_offset +
1324 sizeof (ethernet_header_t);
1325 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1326 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001327 next0 = em->l2_next;
1328 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001329 next1 = em->l2_next;
1330 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001331 }
John Locc532852016-12-14 15:42:45 -05001332 else
1333 {
John Lo4a302ee2020-05-12 22:34:39 -04001334 if (hi->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
1335 goto skip_dmac_check01;
1336
Matthew Smith42bde452019-11-18 09:35:24 -06001337 dmacs[0] = *(u64 *) e0;
1338 dmacs[1] = *(u64 *) e1;
1339
1340 if (ei && vec_len (ei->secondary_addrs))
1341 ethernet_input_inline_dmac_check (hi, dmacs,
1342 dmacs_bad,
1343 2 /* n_packets */ ,
1344 ei,
1345 1 /* have_sec_dmac */ );
1346 else
1347 ethernet_input_inline_dmac_check (hi, dmacs,
1348 dmacs_bad,
1349 2 /* n_packets */ ,
1350 ei,
1351 0 /* have_sec_dmac */ );
1352
1353 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001354 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001355 if (dmacs_bad[1])
John Lo1904c472017-03-10 17:15:22 -05001356 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001357
John Lo4a302ee2020-05-12 22:34:39 -04001358 skip_dmac_check01:
John Lob14826e2018-04-18 15:52:23 -04001359 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001360 determine_next_node (em, variant, 0, type0, b0,
1361 &error0, &next0);
John Lob14826e2018-04-18 15:52:23 -04001362 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001363 determine_next_node (em, variant, 0, type1, b1,
1364 &error1, &next1);
John Locc532852016-12-14 15:42:45 -05001365 }
1366 goto ship_it01;
Dave Barachcfba1e22016-11-16 10:23:50 -05001367 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001368
John Locc532852016-12-14 15:42:45 -05001369 /* Slow-path for the tagged case */
1370 slowpath:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001371 parse_header (variant,
1372 b0,
1373 &type0,
1374 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001375
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001376 parse_header (variant,
1377 b1,
1378 &type1,
1379 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001380
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001381 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1382 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001383
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001384 eth_vlan_table_lookups (em,
1385 vnm,
1386 old_sw_if_index0,
1387 orig_type0,
1388 outer_id0,
1389 inner_id0,
1390 &hi0,
1391 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001392
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001393 eth_vlan_table_lookups (em,
1394 vnm,
1395 old_sw_if_index1,
1396 orig_type1,
1397 outer_id1,
1398 inner_id1,
1399 &hi1,
1400 &main_intf1, &vlan_intf1, &qinq_intf1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001401
1402 identify_subint (hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001403 b0,
1404 match_flags0,
1405 main_intf0,
1406 vlan_intf0,
1407 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001408
1409 identify_subint (hi1,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001410 b1,
1411 match_flags1,
1412 main_intf1,
1413 vlan_intf1,
1414 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001415
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001416 // Save RX sw_if_index for later nodes
1417 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1418 error0 !=
1419 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1420 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1421 error1 !=
1422 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001423
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001424 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1425 if (((new_sw_if_index0 != ~0)
1426 && (new_sw_if_index0 != old_sw_if_index0))
1427 || ((new_sw_if_index1 != ~0)
1428 && (new_sw_if_index1 != old_sw_if_index1)))
1429 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001430
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001431 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001432 - vnet_buffer (b0)->l2_hdr_offset;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001433 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001434 - vnet_buffer (b1)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001435
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001436 stats_n_packets += 2;
1437 stats_n_bytes += len0 + len1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001438
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001439 if (PREDICT_FALSE
1440 (!(new_sw_if_index0 == stats_sw_if_index
1441 && new_sw_if_index1 == stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001442 {
1443 stats_n_packets -= 2;
1444 stats_n_bytes -= len0 + len1;
1445
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001446 if (new_sw_if_index0 != old_sw_if_index0
1447 && new_sw_if_index0 != ~0)
1448 vlib_increment_combined_counter (vnm->
1449 interface_main.combined_sw_if_counters
1450 +
1451 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001452 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001453 new_sw_if_index0, 1,
1454 len0);
1455 if (new_sw_if_index1 != old_sw_if_index1
1456 && new_sw_if_index1 != ~0)
1457 vlib_increment_combined_counter (vnm->
1458 interface_main.combined_sw_if_counters
1459 +
1460 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001461 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001462 new_sw_if_index1, 1,
1463 len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001464
1465 if (new_sw_if_index0 == new_sw_if_index1)
1466 {
1467 if (stats_n_packets > 0)
1468 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001469 vlib_increment_combined_counter
1470 (vnm->interface_main.combined_sw_if_counters
1471 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001472 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001473 stats_sw_if_index,
1474 stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001475 stats_n_packets = stats_n_bytes = 0;
1476 }
1477 stats_sw_if_index = new_sw_if_index0;
1478 }
1479 }
1480 }
1481
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001482 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1483 is_l20 = is_l21 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001484
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001485 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1486 &next0);
1487 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1488 &next1);
1489
John Lo1904c472017-03-10 17:15:22 -05001490 ship_it01:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001491 b0->error = error_node->errors[error0];
1492 b1->error = error_node->errors[error1];
1493
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001494 // verify speculative enqueue
1495 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1496 n_left_to_next, bi0, bi1, next0,
1497 next1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001498 }
1499
1500 while (n_left_from > 0 && n_left_to_next > 0)
1501 {
1502 u32 bi0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001503 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001504 u8 error0, next0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001505 u16 type0, orig_type0;
1506 u16 outer_id0, inner_id0;
1507 u32 match_flags0;
1508 u32 old_sw_if_index0, new_sw_if_index0, len0;
1509 vnet_hw_interface_t *hi0;
1510 main_intf_t *main_intf0;
1511 vlan_intf_t *vlan_intf0;
1512 qinq_intf_t *qinq_intf0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001513 ethernet_header_t *e0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001514 u32 is_l20;
Matthew Smith42bde452019-11-18 09:35:24 -06001515 u64 dmacs[2];
1516 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001517
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001518 // Prefetch next iteration
1519 if (n_left_from > 1)
1520 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001521 vlib_prefetch_buffer_header (b[1], STORE);
1522 CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001523 }
1524
1525 bi0 = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001526 to_next[0] = bi0;
1527 from += 1;
1528 to_next += 1;
1529 n_left_from -= 1;
1530 n_left_to_next -= 1;
1531
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001532 b0 = b[0];
1533 b += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001534
1535 error0 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001536 e0 = vlib_buffer_get_current (b0);
1537 type0 = clib_net_to_host_u16 (e0->type);
1538
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001539 /* Set the L2 header offset for all packets */
1540 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1541 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1542
John Locc532852016-12-14 15:42:45 -05001543 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001544 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1545 && !ethernet_frame_is_tagged (type0)))
1546 {
1547 main_intf_t *intf0;
1548 subint_config_t *subint0;
1549 u32 sw_if_index0;
1550
1551 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1552 is_l20 = cached_is_l2;
1553
1554 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1555 {
1556 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001557 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001558 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001559 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001560 subint0 = &intf0->untagged_subint;
1561 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1562 }
John Lo7714b302016-12-20 16:59:02 -05001563
John Lo7714b302016-12-20 16:59:02 -05001564
Dave Barachcfba1e22016-11-16 10:23:50 -05001565 if (PREDICT_TRUE (is_l20 != 0))
1566 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001567 vnet_buffer (b0)->l3_hdr_offset =
1568 vnet_buffer (b0)->l2_hdr_offset +
1569 sizeof (ethernet_header_t);
1570 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001571 next0 = em->l2_next;
1572 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001573 }
John Locc532852016-12-14 15:42:45 -05001574 else
1575 {
John Lo4a302ee2020-05-12 22:34:39 -04001576 if (hi->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
1577 goto skip_dmac_check0;
1578
Matthew Smith42bde452019-11-18 09:35:24 -06001579 dmacs[0] = *(u64 *) e0;
1580
1581 if (ei && vec_len (ei->secondary_addrs))
1582 ethernet_input_inline_dmac_check (hi, dmacs,
1583 dmacs_bad,
1584 1 /* n_packets */ ,
1585 ei,
1586 1 /* have_sec_dmac */ );
1587 else
1588 ethernet_input_inline_dmac_check (hi, dmacs,
1589 dmacs_bad,
1590 1 /* n_packets */ ,
1591 ei,
1592 0 /* have_sec_dmac */ );
1593
1594 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001595 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001596
John Lo4a302ee2020-05-12 22:34:39 -04001597 skip_dmac_check0:
Andrew Yourtchenkoe78bca12018-10-10 16:15:55 +02001598 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001599 determine_next_node (em, variant, 0, type0, b0,
1600 &error0, &next0);
John Locc532852016-12-14 15:42:45 -05001601 }
1602 goto ship_it0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001603 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001604
John Locc532852016-12-14 15:42:45 -05001605 /* Slow-path for the tagged case */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001606 parse_header (variant,
1607 b0,
1608 &type0,
1609 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001610
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001611 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001612
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001613 eth_vlan_table_lookups (em,
1614 vnm,
1615 old_sw_if_index0,
1616 orig_type0,
1617 outer_id0,
1618 inner_id0,
1619 &hi0,
1620 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001621
1622 identify_subint (hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001623 b0,
1624 match_flags0,
1625 main_intf0,
1626 vlan_intf0,
1627 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001628
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001629 // Save RX sw_if_index for later nodes
1630 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1631 error0 !=
1632 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001633
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001634 // Increment subinterface stats
1635 // Note that interface-level counters have already been incremented
1636 // prior to calling this function. Thus only subinterface counters
1637 // are incremented here.
1638 //
Damjan Marion607de1a2016-08-16 22:53:54 +02001639 // Interface level counters include packets received on the main
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001640 // interface and all subinterfaces. Subinterface level counters
1641 // include only those packets received on that subinterface
Ed Warnickecb9cada2015-12-08 15:45:58 -07001642 // Increment stats if the subint is valid and it is not the main intf
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001643 if ((new_sw_if_index0 != ~0)
1644 && (new_sw_if_index0 != old_sw_if_index0))
1645 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001646
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001647 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001648 - vnet_buffer (b0)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001649
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001650 stats_n_packets += 1;
1651 stats_n_bytes += len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001652
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001653 // Batch stat increments from the same subinterface so counters
Damjan Marion607de1a2016-08-16 22:53:54 +02001654 // don't need to be incremented for every packet.
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001655 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1656 {
1657 stats_n_packets -= 1;
1658 stats_n_bytes -= len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001659
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001660 if (new_sw_if_index0 != ~0)
1661 vlib_increment_combined_counter
1662 (vnm->interface_main.combined_sw_if_counters
1663 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001664 thread_index, new_sw_if_index0, 1, len0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001665 if (stats_n_packets > 0)
1666 {
1667 vlib_increment_combined_counter
1668 (vnm->interface_main.combined_sw_if_counters
1669 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001670 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001671 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1672 stats_n_packets = stats_n_bytes = 0;
1673 }
1674 stats_sw_if_index = new_sw_if_index0;
1675 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001676 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001677
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001678 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1679 is_l20 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001680
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001681 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1682 &next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001683
John Lo1904c472017-03-10 17:15:22 -05001684 ship_it0:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001685 b0->error = error_node->errors[error0];
1686
1687 // verify speculative enqueue
Ed Warnickecb9cada2015-12-08 15:45:58 -07001688 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1689 to_next, n_left_to_next,
1690 bi0, next0);
1691 }
1692
1693 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1694 }
1695
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001696 // Increment any remaining batched stats
1697 if (stats_n_packets > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001698 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001699 vlib_increment_combined_counter
1700 (vnm->interface_main.combined_sw_if_counters
1701 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001702 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001703 node->runtime_data[0] = stats_sw_if_index;
1704 }
Damjan Marion650223c2018-11-14 16:55:53 +01001705}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001706
Damjan Marion5beecec2018-09-10 13:09:21 +02001707VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1708 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001709 vlib_frame_t * frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001710{
Damjan Marion650223c2018-11-14 16:55:53 +01001711 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001712 u32 *from = vlib_frame_vector_args (frame);
1713 u32 n_packets = frame->n_vectors;
1714
1715 ethernet_input_trace (vm, node, frame);
1716
1717 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1718 {
Damjan Marion650223c2018-11-14 16:55:53 +01001719 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
Damjan Marion650223c2018-11-14 16:55:53 +01001720 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001721 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1722 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
Damjan Marion650223c2018-11-14 16:55:53 +01001723 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001724 else
1725 ethernet_input_inline (vm, node, from, n_packets,
1726 ETHERNET_INPUT_VARIANT_ETHERNET);
Damjan Marion650223c2018-11-14 16:55:53 +01001727 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001728}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001729
Damjan Marion5beecec2018-09-10 13:09:21 +02001730VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1731 vlib_node_runtime_t * node,
1732 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001733{
Damjan Marion650223c2018-11-14 16:55:53 +01001734 u32 *from = vlib_frame_vector_args (from_frame);
1735 u32 n_packets = from_frame->n_vectors;
1736 ethernet_input_trace (vm, node, from_frame);
1737 ethernet_input_inline (vm, node, from, n_packets,
1738 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1739 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001740}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001741
Damjan Marion5beecec2018-09-10 13:09:21 +02001742VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1743 vlib_node_runtime_t * node,
1744 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001745{
Damjan Marion650223c2018-11-14 16:55:53 +01001746 u32 *from = vlib_frame_vector_args (from_frame);
1747 u32 n_packets = from_frame->n_vectors;
1748 ethernet_input_trace (vm, node, from_frame);
1749 ethernet_input_inline (vm, node, from, n_packets,
1750 ETHERNET_INPUT_VARIANT_NOT_L2);
1751 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001752}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001753
1754
1755// Return the subinterface config struct for the given sw_if_index
1756// Also return via parameter the appropriate match flags for the
1757// configured number of tags.
1758// On error (unsupported or not ethernet) return 0.
1759static subint_config_t *
1760ethernet_sw_interface_get_config (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001761 u32 sw_if_index,
1762 u32 * flags, u32 * unsupported)
1763{
1764 ethernet_main_t *em = &ethernet_main;
1765 vnet_hw_interface_t *hi;
1766 vnet_sw_interface_t *si;
1767 main_intf_t *main_intf;
1768 vlan_table_t *vlan_table;
1769 qinq_table_t *qinq_table;
1770 subint_config_t *subint = 0;
1771
Ed Warnickecb9cada2015-12-08 15:45:58 -07001772 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1773
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001774 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1775 {
1776 *unsupported = 0;
1777 goto done; // non-ethernet interface
1778 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001779
1780 // ensure there's an entry for the main intf (shouldn't really be necessary)
1781 vec_validate (em->main_intfs, hi->hw_if_index);
1782 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1783
1784 // Locate the subint for the given ethernet config
1785 si = vnet_get_sw_interface (vnm, sw_if_index);
1786
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001787 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1788 {
1789 p2p_ethernet_main_t *p2pm = &p2p_main;
1790 u32 p2pe_sw_if_index =
1791 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1792 if (p2pe_sw_if_index == ~0)
1793 {
1794 pool_get (p2pm->p2p_subif_pool, subint);
1795 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1796 }
1797 else
1798 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1799 *flags = SUBINT_CONFIG_P2P;
1800 }
Neale Ranns17ff3c12018-07-04 10:24:24 -07001801 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1802 {
1803 pipe_t *pipe;
1804
1805 pipe = pipe_get (sw_if_index);
1806 subint = &pipe->subint;
1807 *flags = SUBINT_CONFIG_P2P;
1808 }
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001809 else if (si->sub.eth.flags.default_sub)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001810 {
1811 subint = &main_intf->default_subint;
Mike Bly88076742018-09-24 10:13:06 -07001812 *flags = SUBINT_CONFIG_MATCH_1_TAG |
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001813 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1814 }
1815 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1816 {
1817 // if no flags are set then this is a main interface
1818 // so treat as untagged
1819 subint = &main_intf->untagged_subint;
1820 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1821 }
1822 else
1823 {
1824 // one or two tags
1825 // first get the vlan table
1826 if (si->sub.eth.flags.dot1ad)
1827 {
1828 if (main_intf->dot1ad_vlans == 0)
1829 {
1830 // Allocate a vlan table from the pool
1831 pool_get (em->vlan_pool, vlan_table);
1832 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1833 }
1834 else
1835 {
1836 // Get ptr to existing vlan table
1837 vlan_table =
1838 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1839 }
1840 }
1841 else
1842 { // dot1q
1843 if (main_intf->dot1q_vlans == 0)
1844 {
1845 // Allocate a vlan table from the pool
1846 pool_get (em->vlan_pool, vlan_table);
1847 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1848 }
1849 else
1850 {
1851 // Get ptr to existing vlan table
1852 vlan_table =
1853 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1854 }
1855 }
1856
1857 if (si->sub.eth.flags.one_tag)
1858 {
1859 *flags = si->sub.eth.flags.exact_match ?
1860 SUBINT_CONFIG_MATCH_1_TAG :
1861 (SUBINT_CONFIG_MATCH_1_TAG |
1862 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1863
1864 if (si->sub.eth.flags.outer_vlan_id_any)
1865 {
1866 // not implemented yet
1867 *unsupported = 1;
1868 goto done;
1869 }
1870 else
1871 {
1872 // a single vlan, a common case
1873 subint =
1874 &vlan_table->vlans[si->sub.eth.
1875 outer_vlan_id].single_tag_subint;
1876 }
1877
1878 }
1879 else
1880 {
1881 // Two tags
1882 *flags = si->sub.eth.flags.exact_match ?
1883 SUBINT_CONFIG_MATCH_2_TAG :
1884 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1885
1886 if (si->sub.eth.flags.outer_vlan_id_any
1887 && si->sub.eth.flags.inner_vlan_id_any)
1888 {
1889 // not implemented yet
1890 *unsupported = 1;
1891 goto done;
1892 }
1893
1894 if (si->sub.eth.flags.inner_vlan_id_any)
1895 {
1896 // a specific outer and "any" inner
1897 // don't need a qinq table for this
1898 subint =
1899 &vlan_table->vlans[si->sub.eth.
1900 outer_vlan_id].inner_any_subint;
1901 if (si->sub.eth.flags.exact_match)
1902 {
1903 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1904 }
1905 else
1906 {
1907 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1908 SUBINT_CONFIG_MATCH_3_TAG;
1909 }
1910 }
1911 else
1912 {
1913 // a specific outer + specifc innner vlan id, a common case
1914
1915 // get the qinq table
1916 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1917 {
1918 // Allocate a qinq table from the pool
1919 pool_get (em->qinq_pool, qinq_table);
1920 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1921 qinq_table - em->qinq_pool;
1922 }
1923 else
1924 {
1925 // Get ptr to existing qinq table
1926 qinq_table =
1927 vec_elt_at_index (em->qinq_pool,
1928 vlan_table->vlans[si->sub.
1929 eth.outer_vlan_id].
1930 qinqs);
1931 }
1932 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1933 }
1934 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001935 }
1936
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001937done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001938 return subint;
1939}
1940
Damjan Marion5beecec2018-09-10 13:09:21 +02001941static clib_error_t *
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001942ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001943{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001944 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001945 u32 placeholder_flags;
1946 u32 placeholder_unsup;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001947 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001948
1949 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001950 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001951 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1952 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001953
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001954 if (subint == 0)
1955 {
1956 // not implemented yet or not ethernet
1957 goto done;
1958 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001959
1960 subint->sw_if_index =
1961 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1962
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001963done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001964 return error;
1965}
1966
1967VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1968
1969
Damjan Marion5beecec2018-09-10 13:09:21 +02001970#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -07001971// Set the L2/L3 mode for the subinterface
1972void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001973ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001974{
1975 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04001976 u32 placeholder_flags;
1977 u32 placeholder_unsup;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001978 int is_port;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001979 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001980
1981 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1982
1983 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001984 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04001985 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
1986 &placeholder_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001987
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001988 if (subint == 0)
1989 {
1990 // unimplemented or not ethernet
1991 goto done;
1992 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001993
1994 // Double check that the config we found is for our interface (or the interface is down)
1995 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1996
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001997 if (l2)
1998 {
1999 subint->flags |= SUBINT_CONFIG_L2;
2000 if (is_port)
2001 subint->flags |=
2002 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
2003 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
2004 }
2005 else
2006 {
2007 subint->flags &= ~SUBINT_CONFIG_L2;
2008 if (is_port)
2009 subint->flags &=
2010 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
2011 | SUBINT_CONFIG_MATCH_3_TAG);
2012 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002013
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002014done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002015 return;
2016}
2017
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002018/*
2019 * Set the L2/L3 mode for the subinterface regardless of port
2020 */
2021void
2022ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002023 u32 sw_if_index, u32 l2)
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002024{
2025 subint_config_t *subint;
Dave Barach11fb09e2020-08-06 12:10:09 -04002026 u32 placeholder_flags;
2027 u32 placeholder_unsup;
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002028
2029 /* Find the config for this subinterface */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002030 subint =
Dave Barach11fb09e2020-08-06 12:10:09 -04002031 ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
2032 &placeholder_unsup);
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002033
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002034 if (subint == 0)
2035 {
2036 /* unimplemented or not ethernet */
2037 goto done;
2038 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002039
2040 /*
2041 * Double check that the config we found is for our interface (or the
2042 * interface is down)
2043 */
2044 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2045
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002046 if (l2)
2047 {
2048 subint->flags |= SUBINT_CONFIG_L2;
2049 }
2050 else
2051 {
2052 subint->flags &= ~SUBINT_CONFIG_L2;
2053 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002054
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002055done:
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002056 return;
2057}
Damjan Marion5beecec2018-09-10 13:09:21 +02002058#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07002059
2060static clib_error_t *
2061ethernet_sw_interface_add_del (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002062 u32 sw_if_index, u32 is_create)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002063{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002064 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002065 subint_config_t *subint;
2066 u32 match_flags;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002067 u32 unsupported = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002068
2069 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002070 subint =
2071 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
2072 &unsupported);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002073
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002074 if (subint == 0)
2075 {
2076 // not implemented yet or not ethernet
2077 if (unsupported)
2078 {
Damjan Marion607de1a2016-08-16 22:53:54 +02002079 // this is the NYI case
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002080 error = clib_error_return (0, "not implemented yet");
2081 }
2082 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002083 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002084
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002085 if (!is_create)
2086 {
2087 subint->flags = 0;
2088 return error;
2089 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002090
2091 // Initialize the subint
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002092 if (subint->flags & SUBINT_CONFIG_VALID)
2093 {
2094 // Error vlan already in use
2095 error = clib_error_return (0, "vlan is already in use");
2096 }
2097 else
2098 {
Neale Ranns17ff3c12018-07-04 10:24:24 -07002099 // Note that config is L3 by default
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002100 subint->flags = SUBINT_CONFIG_VALID | match_flags;
2101 subint->sw_if_index = ~0; // because interfaces are initially down
2102 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002103
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002104done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002105 return error;
2106}
2107
2108VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2109
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002110static char *ethernet_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002111#define ethernet_error(n,c,s) s,
2112#include "error.def"
2113#undef ethernet_error
2114};
2115
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002116/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002117VLIB_REGISTER_NODE (ethernet_input_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002118 .name = "ethernet-input",
2119 /* Takes a vector of packets. */
2120 .vector_size = sizeof (u32),
Damjan Marion650223c2018-11-14 16:55:53 +01002121 .scalar_size = sizeof (ethernet_input_frame_t),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002122 .n_errors = ETHERNET_N_ERROR,
2123 .error_strings = ethernet_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002124 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2125 .next_nodes = {
2126#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2127 foreach_ethernet_input_next
2128#undef _
2129 },
Ed Warnickecb9cada2015-12-08 15:45:58 -07002130 .format_buffer = format_ethernet_header_with_length,
2131 .format_trace = format_ethernet_input_trace,
2132 .unformat_buffer = unformat_ethernet_header,
2133};
2134
Damjan Marion5beecec2018-09-10 13:09:21 +02002135VLIB_REGISTER_NODE (ethernet_input_type_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002136 .name = "ethernet-input-type",
2137 /* Takes a vector of packets. */
2138 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002139 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2140 .next_nodes = {
2141#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2142 foreach_ethernet_input_next
2143#undef _
2144 },
2145};
2146
Damjan Marion5beecec2018-09-10 13:09:21 +02002147VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002148 .name = "ethernet-input-not-l2",
2149 /* Takes a vector of packets. */
2150 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002151 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2152 .next_nodes = {
2153#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2154 foreach_ethernet_input_next
2155#undef _
2156 },
2157};
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002158/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002159
Damjan Marion5beecec2018-09-10 13:09:21 +02002160#ifndef CLIB_MARCH_VARIANT
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002161void
2162ethernet_set_rx_redirect (vnet_main_t * vnm,
2163 vnet_hw_interface_t * hi, u32 enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002164{
2165 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2166 // don't go directly to ip4-input)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002167 vnet_hw_interface_rx_redirect_to_node
2168 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002169}
2170
2171
2172/*
2173 * Initialization and registration for the next_by_ethernet structure
2174 */
2175
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002176clib_error_t *
2177next_by_ethertype_init (next_by_ethertype_t * l3_next)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002178{
2179 l3_next->input_next_by_type = sparse_vec_new
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002180 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002181 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2182
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002183 vec_validate (l3_next->sparse_index_by_input_next_index,
2184 ETHERNET_INPUT_NEXT_DROP);
2185 vec_validate (l3_next->sparse_index_by_input_next_index,
2186 ETHERNET_INPUT_NEXT_PUNT);
2187 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2188 SPARSE_VEC_INVALID_INDEX;
2189 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2190 SPARSE_VEC_INVALID_INDEX;
2191
Damjan Marion607de1a2016-08-16 22:53:54 +02002192 /*
2193 * Make sure we don't wipe out an ethernet registration by mistake
Dave Barach1f49ed62016-02-24 11:29:06 -05002194 * Can happen if init function ordering constraints are missing.
2195 */
2196 if (CLIB_DEBUG > 0)
2197 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002198 ethernet_main_t *em = &ethernet_main;
2199 ASSERT (em->next_by_ethertype_register_called == 0);
Dave Barach1f49ed62016-02-24 11:29:06 -05002200 }
2201
Ed Warnickecb9cada2015-12-08 15:45:58 -07002202 return 0;
2203}
2204
2205// Add an ethertype -> next index mapping to the structure
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002206clib_error_t *
2207next_by_ethertype_register (next_by_ethertype_t * l3_next,
2208 u32 ethertype, u32 next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002209{
2210 u32 i;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002211 u16 *n;
2212 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002213
Dave Barach1f49ed62016-02-24 11:29:06 -05002214 if (CLIB_DEBUG > 0)
2215 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002216 ethernet_main_t *em = &ethernet_main;
Dave Barach1f49ed62016-02-24 11:29:06 -05002217 em->next_by_ethertype_register_called = 1;
2218 }
2219
Ed Warnickecb9cada2015-12-08 15:45:58 -07002220 /* Setup ethernet type -> next index sparse vector mapping. */
2221 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2222 n[0] = next_index;
2223
2224 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2225 is updated. */
2226 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2227 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002228 l3_next->
2229 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002230
2231 // do not allow the cached next index's to be updated if L3
2232 // redirect is enabled, as it will have overwritten them
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002233 if (!em->redirect_l3)
2234 {
2235 // Cache common ethertypes directly
2236 if (ethertype == ETHERNET_TYPE_IP4)
2237 {
2238 l3_next->input_next_ip4 = next_index;
2239 }
2240 else if (ethertype == ETHERNET_TYPE_IP6)
2241 {
2242 l3_next->input_next_ip6 = next_index;
2243 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002244 else if (ethertype == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002245 {
2246 l3_next->input_next_mpls = next_index;
2247 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002248 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002249 return 0;
2250}
2251
Dave Barachf8d50682019-05-14 18:01:44 -04002252void
2253ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002254{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002255 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2256 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002257
2258 ethernet_setup_node (vm, ethernet_input_node.index);
2259 ethernet_setup_node (vm, ethernet_input_type_node.index);
2260 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2261
2262 next_by_ethertype_init (&em->l3_next);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002263
Ed Warnickecb9cada2015-12-08 15:45:58 -07002264 // Initialize pools and vector for vlan parsing
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002265 vec_validate (em->main_intfs, 10); // 10 main interfaces
2266 pool_alloc (em->vlan_pool, 10);
2267 pool_alloc (em->qinq_pool, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002268
2269 // The first vlan pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002270 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002271 // The first qinq pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002272 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002273}
2274
Ed Warnickecb9cada2015-12-08 15:45:58 -07002275void
2276ethernet_register_input_type (vlib_main_t * vm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002277 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002278{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002279 ethernet_main_t *em = &ethernet_main;
2280 ethernet_type_info_t *ti;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002281 u32 i;
2282
2283 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002284 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002285 if (error)
2286 clib_error_report (error);
2287 }
2288
2289 ti = ethernet_get_type_info (em, type);
Dave Barach4bda2d92019-07-03 15:21:50 -04002290 if (ti == 0)
2291 {
2292 clib_warning ("type_info NULL for type %d", type);
2293 return;
2294 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002295 ti->node_index = node_index;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002296 ti->next_index = vlib_node_add_next (vm,
2297 ethernet_input_node.index, node_index);
2298 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002299 ASSERT (i == ti->next_index);
2300
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002301 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002302 ASSERT (i == ti->next_index);
2303
2304 // Add the L3 node for this ethertype to the next nodes structure
2305 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2306
2307 // Call the registration functions for other nodes that want a mapping
2308 l2bvi_register_input_type (vm, type, node_index);
2309}
2310
2311void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002312ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002313{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002314 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002315 u32 i;
2316
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002317 em->l2_next =
2318 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002319
Damjan Marion607de1a2016-08-16 22:53:54 +02002320 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002321 * Even if we never use these arcs, we have to align the next indices...
2322 */
2323 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2324
2325 ASSERT (i == em->l2_next);
2326
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002327 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002328 ASSERT (i == em->l2_next);
2329}
2330
2331// Register a next node for L3 redirect, and enable L3 redirect
2332void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002333ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002334{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002335 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002336 u32 i;
2337
2338 em->redirect_l3 = 1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002339 em->redirect_l3_next = vlib_node_add_next (vm,
2340 ethernet_input_node.index,
2341 node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002342 /*
2343 * Change the cached next nodes to the redirect node
2344 */
2345 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2346 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2347 em->l3_next.input_next_mpls = em->redirect_l3_next;
2348
2349 /*
2350 * Even if we never use these arcs, we have to align the next indices...
2351 */
2352 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2353
2354 ASSERT (i == em->redirect_l3_next);
jerryianff82ed62016-12-05 17:13:00 +08002355
2356 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2357
2358 ASSERT (i == em->redirect_l3_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002359}
Damjan Marion5beecec2018-09-10 13:09:21 +02002360#endif
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002361
2362/*
2363 * fd.io coding-style-patch-verification: ON
2364 *
2365 * Local Variables:
2366 * eval: (c-set-style "gnu")
2367 * End:
2368 */