blob: 3c4330e6225a0ff1ff1e8ab395b81eb6ab96dfec [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Damjan Marion650223c2018-11-14 16:55:53 +01002 * Copyright (c) 2018 Cisco and/or its affiliates.
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * ethernet_node.c: ethernet packet processing
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vnet/pg/pg.h>
42#include <vnet/ethernet/ethernet.h>
Pavel Kotucek15ac81c2017-06-20 14:00:26 +020043#include <vnet/ethernet/p2p_ethernet.h>
Neale Ranns17ff3c12018-07-04 10:24:24 -070044#include <vnet/devices/pipe/pipe.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045#include <vppinfra/sparse_vec.h>
46#include <vnet/l2/l2_bvi.h>
Dave Barach9137e542019-09-13 17:47:50 -040047#include <vnet/classify/trace_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
Damjan Marion650223c2018-11-14 16:55:53 +010052 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070056typedef enum
57{
Ed Warnickecb9cada2015-12-08 15:45:58 -070058#define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
60#undef _
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070061 ETHERNET_INPUT_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070062} ethernet_input_next_t;
63
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070064typedef struct
65{
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 u8 packet_data[32];
Damjan Marion650223c2018-11-14 16:55:53 +010067 u16 frame_flags;
68 ethernet_input_frame_t frame_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069} ethernet_input_trace_t;
70
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070071static u8 *
72format_ethernet_input_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070076 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
Damjan Marion650223c2018-11-14 16:55:53 +010077 u32 indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -070078
Damjan Marion650223c2018-11-14 16:55:53 +010079 if (t->frame_flags)
80 {
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 s = format (s, "%U", format_ethernet_header, t->packet_data);
88
89 return s;
90}
91
Damjan Marione849da22018-09-12 13:32:01 +020092extern vlib_node_registration_t ethernet_input_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -070094typedef enum
95{
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
Ed Warnickecb9cada2015-12-08 15:45:58 -070098 ETHERNET_INPUT_VARIANT_NOT_L2,
99} ethernet_input_variant_t;
100
101
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102// Parse the ethernet header to extract vlan tags and innermost ethertype
103static_always_inline void
104parse_header (ethernet_input_variant_t variant,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700105 vlib_buffer_t * b0,
106 u16 * type,
107 u16 * orig_type,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
109{
Chris Luke194ebc52016-04-25 14:26:55 -0400110 u8 vlan_count;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700111
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
114 {
115 ethernet_header_t *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116
Zhiyong Yang9f833582020-04-11 14:36:55 +0000117 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Damjan Marion072401e2017-07-13 18:53:27 +0200119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
Steven35de3b32017-12-03 23:40:54 -0800120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700122 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700124 *type = clib_net_to_host_u16 (e0->type);
125 }
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
127 {
128 // here when prior node was LLC/SNAP processing
129 u16 *e0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
Zhiyong Yang9f833582020-04-11 14:36:55 +0000131 e0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700133 vlib_buffer_advance (b0, sizeof (e0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700135 *type = clib_net_to_host_u16 (e0[0]);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137
138 // save for distinguishing between dot1q and dot1ad later
139 *orig_type = *type;
140
141 // default the tags to 0 (used if there is no corresponding tag)
142 *outer_id = 0;
143 *inner_id = 0;
144
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
Chris Luke194ebc52016-04-25 14:26:55 -0400146 vlan_count = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
148 // check for vlan encaps
Damjan Marionb94bdad2016-09-19 11:32:03 +0200149 if (ethernet_frame_is_tagged (*type))
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700150 {
151 ethernet_vlan_header_t *h0;
152 u16 tag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155
Zhiyong Yang9f833582020-04-11 14:36:55 +0000156 h0 = vlib_buffer_get_current (b0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
159
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700160 *outer_id = tag & 0xfff;
Neale Ranns30d0fd42017-05-30 07:30:04 -0700161 if (0 == *outer_id)
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700164 *type = clib_net_to_host_u16 (h0->type);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165
166 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700167 vlan_count = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700169 if (*type == ETHERNET_TYPE_VLAN)
170 {
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
173
Zhiyong Yang9f833582020-04-11 14:36:55 +0000174 h0 = vlib_buffer_get_current (b0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700175
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
177
178 *inner_id = tag & 0xfff;
179
180 *type = clib_net_to_host_u16 (h0->type);
181
182 vlib_buffer_advance (b0, sizeof (h0[0]));
183 vlan_count = 2;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700184 if (*type == ETHERNET_TYPE_VLAN)
185 {
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300188
189 vlib_buffer_advance (b0, sizeof (h0[0]));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700190 vlan_count = 3; // "unknown" number, aka, 3-or-more
191 }
192 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700194 ethernet_buffer_set_vlan_count (b0, vlan_count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195}
196
Matthew Smith42bde452019-11-18 09:35:24 -0600197static_always_inline void
198ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
199 u64 * dmacs, u8 * dmacs_bad,
200 u32 n_packets, ethernet_interface_t * ei,
201 u8 have_sec_dmac);
202
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203// Determine the subinterface for this packet, given the result of the
204// vlan table lookups and vlan header parsing. Check the most specific
205// matches first.
206static_always_inline void
207identify_subint (vnet_hw_interface_t * hi,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700208 vlib_buffer_t * b0,
209 u32 match_flags,
210 main_intf_t * main_intf,
211 vlan_intf_t * vlan_intf,
212 qinq_intf_t * qinq_intf,
213 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700214{
215 u32 matched;
216
Damjan Marionddf6e082018-11-26 16:05:07 +0100217 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
218 qinq_intf, new_sw_if_index, error0, is_l2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700220 if (matched)
221 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700222 // Perform L3 my-mac filter
223 // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
224 // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
225 if (!(*is_l2))
226 {
Matthew Smith42bde452019-11-18 09:35:24 -0600227 u64 dmacs[2];
228 u8 dmacs_bad[2];
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700229 ethernet_header_t *e0;
Matthew Smith42bde452019-11-18 09:35:24 -0600230 ethernet_interface_t *ei0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231
Matthew Smith42bde452019-11-18 09:35:24 -0600232 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
233 dmacs[0] = *(u64 *) e0;
234 ei0 = ethernet_get_interface (&ethernet_main, hi->hw_if_index);
235
236 if (ei0 && vec_len (ei0->secondary_addrs))
237 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
238 1 /* n_packets */ , ei0,
239 1 /* have_sec_dmac */ );
240 else
241 ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
242 1 /* n_packets */ , ei0,
243 0 /* have_sec_dmac */ );
244
245 if (dmacs_bad[0])
246 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700247 }
248
249 // Check for down subinterface
250 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252}
253
254static_always_inline void
255determine_next_node (ethernet_main_t * em,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700256 ethernet_input_variant_t variant,
257 u32 is_l20,
258 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259{
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200260 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
261 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
262
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700263 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
264 {
265 // some error occurred
266 *next0 = ETHERNET_INPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700267 }
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700268 else if (is_l20)
269 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700270 // record the L2 len and reset the buffer so the L2 header is preserved
John Lob14826e2018-04-18 15:52:23 -0400271 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
272 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
273 *next0 = em->l2_next;
Eyal Bari6f7ebf92017-06-13 12:09:37 +0300274 ASSERT (vnet_buffer (b0)->l2.l2_len ==
275 ethernet_buffer_header_size (b0));
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +0200276 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700277
278 // check for common IP/MPLS ethertypes
279 }
280 else if (type0 == ETHERNET_TYPE_IP4)
281 {
282 *next0 = em->l3_next.input_next_ip4;
283 }
284 else if (type0 == ETHERNET_TYPE_IP6)
285 {
286 *next0 = em->l3_next.input_next_ip6;
287 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800288 else if (type0 == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700289 {
290 *next0 = em->l3_next.input_next_mpls;
291
292 }
293 else if (em->redirect_l3)
294 {
295 // L3 Redirect is on, the cached common next nodes will be
296 // pointing to the redirect node, catch the uncommon types here
297 *next0 = em->redirect_l3_next;
298 }
299 else
300 {
301 // uncommon ethertype, check table
302 u32 i0;
303 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
304 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
305 *error0 =
306 i0 ==
307 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
308
309 // The table is not populated with LLC values, so check that now.
Damjan Marion607de1a2016-08-16 22:53:54 +0200310 // If variant is variant_ethernet then we came from LLC processing. Don't
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -0700311 // go back there; drop instead using by keeping the drop/bad table result.
312 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
313 {
314 *next0 = ETHERNET_INPUT_NEXT_LLC;
315 }
316 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317}
318
Damjan Marion650223c2018-11-14 16:55:53 +0100319
320/* following vector code relies on following assumptions */
321STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
322STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
323STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
324STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
325 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
326 "l3_hdr_offset must follow l2_hdr_offset");
327
328static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100329eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100330{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100331 i16 adv = sizeof (ethernet_header_t);
332 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
333 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
334
Damjan Marion650223c2018-11-14 16:55:53 +0100335#ifdef CLIB_HAVE_VEC256
336 /* to reduce number of small loads/stores we are loading first 64 bits
337 of each buffer metadata into 256-bit register so we can advance
338 current_data, current_length and flags.
339 Observed saving of this code is ~2 clocks per packet */
340 u64x4 r, radv;
341
342 /* vector if signed 16 bit integers used in signed vector add operation
343 to advnce current_data and current_length */
344 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
345 i16x16 adv4 = {
346 adv, -adv, 0, 0, adv, -adv, 0, 0,
347 adv, -adv, 0, 0, adv, -adv, 0, 0
348 };
349
350 /* load 4 x 64 bits */
351 r = u64x4_gather (b[0], b[1], b[2], b[3]);
352
353 /* set flags */
354 r |= (u64x4) flags4;
355
356 /* advance buffer */
357 radv = (u64x4) ((i16x16) r + adv4);
358
359 /* write 4 x 64 bits */
360 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
361
362 /* use old current_data as l2_hdr_offset and new current_data as
363 l3_hdr_offset */
364 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
365
366 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
367 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
368 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
369 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
370 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
371
Damjan Marione9cebdf2018-11-21 00:47:42 +0100372 if (is_l3)
373 {
374 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
375 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
376 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
377 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
Damjan Marion650223c2018-11-14 16:55:53 +0100378
Damjan Marione9cebdf2018-11-21 00:47:42 +0100379 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
380 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
381 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
382 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
383 }
384 else
385 {
386 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
387 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
388 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
389 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
390
391 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
392 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
393 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
394 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
395 }
Damjan Marion650223c2018-11-14 16:55:53 +0100396
397#else
398 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
399 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
400 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
401 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
402 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
403 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
404 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
405 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
406
407 if (is_l3)
408 {
409 vlib_buffer_advance (b[0], adv);
410 vlib_buffer_advance (b[1], adv);
411 vlib_buffer_advance (b[2], adv);
412 vlib_buffer_advance (b[3], adv);
413 }
414
415 b[0]->flags |= flags;
416 b[1]->flags |= flags;
417 b[2]->flags |= flags;
418 b[3]->flags |= flags;
419#endif
420
421 if (!is_l3)
422 {
423 vnet_buffer (b[0])->l2.l2_len = adv;
424 vnet_buffer (b[1])->l2.l2_len = adv;
425 vnet_buffer (b[2])->l2.l2_len = adv;
426 vnet_buffer (b[3])->l2.l2_len = adv;
427 }
428}
429
430static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100431eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +0100432{
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100433 i16 adv = sizeof (ethernet_header_t);
434 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
435 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
436
Damjan Marion650223c2018-11-14 16:55:53 +0100437 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
438 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
439
440 if (is_l3)
441 vlib_buffer_advance (b[0], adv);
442 b[0]->flags |= flags;
443 if (!is_l3)
444 vnet_buffer (b[0])->l2.l2_len = adv;
445}
446
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100447
Damjan Marion650223c2018-11-14 16:55:53 +0100448static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100449eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
450 u64 * dmacs, int offset, int dmac_check)
Damjan Marion650223c2018-11-14 16:55:53 +0100451{
Damjan Marion650223c2018-11-14 16:55:53 +0100452 ethernet_header_t *e;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100453 e = vlib_buffer_get_current (b[offset]);
454#ifdef CLIB_HAVE_VEC128
455 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
456 etype[offset] = ((u16x8) r)[3];
457 tags[offset] = r[1];
458#else
459 etype[offset] = e->type;
460 tags[offset] = *(u64 *) (e + 1);
461#endif
Damjan Marion650223c2018-11-14 16:55:53 +0100462
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100463 if (dmac_check)
464 dmacs[offset] = *(u64 *) e;
465}
Damjan Marion650223c2018-11-14 16:55:53 +0100466
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100467static_always_inline u16
468eth_input_next_by_type (u16 etype)
469{
470 ethernet_main_t *em = &ethernet_main;
471
472 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
473 vec_elt (em->l3_next.input_next_by_type,
474 sparse_vec_index (em->l3_next.input_next_by_type, etype));
475}
476
477typedef struct
478{
479 u64 tag, mask;
480 u32 sw_if_index;
481 u16 type, len, next;
482 i16 adv;
483 u8 err, n_tags;
484 u64 n_packets, n_bytes;
485} eth_input_tag_lookup_t;
486
487static_always_inline void
488eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
489 eth_input_tag_lookup_t * l)
490{
491 if (l->n_packets == 0 || l->sw_if_index == ~0)
492 return;
493
494 if (l->adv > 0)
495 l->n_bytes += l->n_packets * l->len;
496
497 vlib_increment_combined_counter
498 (vnm->interface_main.combined_sw_if_counters +
499 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
500 l->n_packets, l->n_bytes);
501}
502
503static_always_inline void
504eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
505 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
506 u64 tag, u16 * next, vlib_buffer_t * b,
507 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
508 int main_is_l3, int check_dmac)
509{
510 ethernet_main_t *em = &ethernet_main;
511
512 if ((tag ^ l->tag) & l->mask)
Damjan Marion650223c2018-11-14 16:55:53 +0100513 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100514 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
515 vlan_intf_t *vif;
516 qinq_intf_t *qif;
517 vlan_table_t *vlan_table;
518 qinq_table_t *qinq_table;
519 u16 *t = (u16 *) & tag;
520 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
521 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
522 u32 matched, is_l2, new_sw_if_index;
523
524 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
525 mif->dot1ad_vlans : mif->dot1q_vlans);
526 vif = &vlan_table->vlans[vlan1];
527 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
528 qif = &qinq_table->vlans[vlan2];
529 l->err = ETHERNET_ERROR_NONE;
530 l->type = clib_net_to_host_u16 (t[1]);
531
532 if (l->type == ETHERNET_TYPE_VLAN)
533 {
534 l->type = clib_net_to_host_u16 (t[3]);
535 l->n_tags = 2;
536 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
537 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
538 qif, &new_sw_if_index, &l->err,
539 &is_l2);
540 }
541 else
542 {
543 l->n_tags = 1;
544 if (vlan1 == 0)
545 {
546 new_sw_if_index = hi->sw_if_index;
547 l->err = ETHERNET_ERROR_NONE;
548 matched = 1;
549 is_l2 = main_is_l3 == 0;
550 }
551 else
552 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
553 SUBINT_CONFIG_MATCH_1_TAG, mif,
554 vif, qif, &new_sw_if_index,
555 &l->err, &is_l2);
556 }
557
558 if (l->sw_if_index != new_sw_if_index)
559 {
560 eth_input_update_if_counters (vm, vnm, l);
561 l->n_packets = 0;
562 l->n_bytes = 0;
563 l->sw_if_index = new_sw_if_index;
564 }
565 l->tag = tag;
566 l->mask = (l->n_tags == 2) ?
567 clib_net_to_host_u64 (0xffffffffffffffff) :
568 clib_net_to_host_u64 (0xffffffff00000000);
569
570 if (matched && l->sw_if_index == ~0)
571 l->err = ETHERNET_ERROR_DOWN;
572
573 l->len = sizeof (ethernet_header_t) +
574 l->n_tags * sizeof (ethernet_vlan_header_t);
575 if (main_is_l3)
576 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
577 l->n_tags * sizeof (ethernet_vlan_header_t);
578 else
579 l->adv = is_l2 ? 0 : l->len;
580
581 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
582 l->next = ETHERNET_INPUT_NEXT_DROP;
583 else if (is_l2)
584 l->next = em->l2_next;
585 else if (l->type == ETHERNET_TYPE_IP4)
586 l->next = em->l3_next.input_next_ip4;
587 else if (l->type == ETHERNET_TYPE_IP6)
588 l->next = em->l3_next.input_next_ip6;
589 else if (l->type == ETHERNET_TYPE_MPLS)
590 l->next = em->l3_next.input_next_mpls;
591 else if (em->redirect_l3)
592 l->next = em->redirect_l3_next;
593 else
594 {
595 l->next = eth_input_next_by_type (l->type);
596 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
597 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
598 }
599 }
600
601 if (check_dmac && l->adv > 0 && dmac_bad)
602 {
603 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
604 next[0] = ETHERNET_INPUT_NEXT_PUNT;
605 }
606 else
607 next[0] = l->next;
608
609 vlib_buffer_advance (b, l->adv);
610 vnet_buffer (b)->l2.l2_len = l->len;
611 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
612
613 if (l->err == ETHERNET_ERROR_NONE)
614 {
615 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
616 ethernet_buffer_set_vlan_count (b, l->n_tags);
617 }
618 else
619 b->error = node->errors[l->err];
620
621 /* update counters */
622 l->n_packets += 1;
623 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
624}
625
Matthew G Smithd459bf32019-09-04 15:01:04 -0500626#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
627#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
628
629#ifdef CLIB_HAVE_VEC256
630static_always_inline u32
631is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
632{
633 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
634 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
635 return u8x32_msb_mask ((u8x32) (r0));
636}
Matthew Smith42bde452019-11-18 09:35:24 -0600637#endif
638
Matthew G Smithd459bf32019-09-04 15:01:04 -0500639static_always_inline u8
640is_dmac_bad (u64 dmac, u64 hwaddr)
641{
642 u64 r0 = dmac & DMAC_MASK;
643 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
644}
Matthew G Smithd459bf32019-09-04 15:01:04 -0500645
646static_always_inline u8
647is_sec_dmac_bad (u64 dmac, u64 hwaddr)
648{
649 return ((dmac & DMAC_MASK) != hwaddr);
650}
651
652#ifdef CLIB_HAVE_VEC256
653static_always_inline u32
654is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
655{
656 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
657 r0 = (r0 != u64x4_splat (hwaddr));
658 return u8x32_msb_mask ((u8x32) (r0));
659}
660#endif
661
662static_always_inline u8
663eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
664{
665 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
666 return dmac_bad[0];
667}
668
669static_always_inline u32
670eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
671{
672#ifdef CLIB_HAVE_VEC256
673 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
674#else
675 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
676 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
677 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
678 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
679#endif
680 return *(u32 *) dmac_bad;
681}
682
Matthew Smith42bde452019-11-18 09:35:24 -0600683/*
684 * DMAC check for ethernet_input_inline()
685 *
686 * dmacs and dmacs_bad are arrays that are 2 elements long
687 * n_packets should be 1 or 2 for ethernet_input_inline()
688 */
689static_always_inline void
690ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
691 u64 * dmacs, u8 * dmacs_bad,
692 u32 n_packets, ethernet_interface_t * ei,
693 u8 have_sec_dmac)
694{
695 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
696 u8 bad = 0;
697
698 dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
699 dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));
700
701 bad = dmacs_bad[0] | dmacs_bad[1];
702
703 if (PREDICT_FALSE (bad && have_sec_dmac))
704 {
705 mac_address_t *sec_addr;
706
707 vec_foreach (sec_addr, ei->secondary_addrs)
708 {
709 hwaddr = (*(u64 *) sec_addr) & DMAC_MASK;
710
711 bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
712 eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
713 dmacs_bad + 1));
714
715 if (!bad)
716 return;
717 }
718 }
719}
720
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500721static_always_inline void
722eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
723 u64 * dmacs, u8 * dmacs_bad,
Matthew G Smithd459bf32019-09-04 15:01:04 -0500724 u32 n_packets, ethernet_interface_t * ei,
725 u8 have_sec_dmac)
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500726{
Matthew G Smithd459bf32019-09-04 15:01:04 -0500727 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500728 u64 *dmac = dmacs;
729 u8 *dmac_bad = dmacs_bad;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500730 u32 bad = 0;
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500731 i32 n_left = n_packets;
732
733#ifdef CLIB_HAVE_VEC256
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500734 while (n_left > 0)
735 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500736 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
737 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500738
739 /* next */
740 dmac += 8;
741 dmac_bad += 8;
742 n_left -= 8;
743 }
744#else
745 while (n_left > 0)
746 {
Matthew G Smithd459bf32019-09-04 15:01:04 -0500747 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
748 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
749 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
750 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500751
752 /* next */
753 dmac += 4;
754 dmac_bad += 4;
755 n_left -= 4;
756 }
757#endif
Matthew G Smithd459bf32019-09-04 15:01:04 -0500758
759 if (have_sec_dmac && bad)
760 {
761 mac_address_t *addr;
762
763 vec_foreach (addr, ei->secondary_addrs)
764 {
765 u64 hwaddr = ((u64 *) addr)[0] & DMAC_MASK;
766 i32 n_left = n_packets;
767 u64 *dmac = dmacs;
768 u8 *dmac_bad = dmacs_bad;
769
770 bad = 0;
771
772 while (n_left > 0)
773 {
774 int adv = 0;
775 int n_bad;
776
777 /* skip any that have already matched */
778 if (!dmac_bad[0])
779 {
780 dmac += 1;
781 dmac_bad += 1;
782 n_left -= 1;
783 continue;
784 }
785
786 n_bad = clib_min (4, n_left);
787
788 /* If >= 4 left, compare 4 together */
789 if (n_bad == 4)
790 {
791 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
792 adv = 4;
793 n_bad = 0;
794 }
795
796 /* handle individually */
797 while (n_bad > 0)
798 {
799 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
800 dmac_bad + adv);
801 adv += 1;
802 n_bad -= 1;
803 }
804
805 dmac += adv;
806 dmac_bad += adv;
807 n_left -= adv;
808 }
809
810 if (!bad) /* can stop looping if everything matched */
811 break;
812 }
813 }
Matthew Smithfa20d4c2019-07-12 11:48:24 -0500814}
815
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100816/* process frame of buffers, store ethertype into array and update
817 buffer metadata fields depending on interface being l2 or l3 assuming that
818 packets are untagged. For tagged packets those fields are updated later.
819 Optionally store Destionation MAC address and tag data into arrays
820 for further processing */
821
822STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
823 "VLIB_FRAME_SIZE must be power of 8");
824static_always_inline void
825eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
826 vnet_hw_interface_t * hi,
827 u32 * buffer_indices, u32 n_packets, int main_is_l3,
828 int ip4_cksum_ok, int dmac_check)
829{
830 ethernet_main_t *em = &ethernet_main;
831 u16 nexts[VLIB_FRAME_SIZE], *next;
832 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
833 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
834 u8 dmacs_bad[VLIB_FRAME_SIZE];
835 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
836 u16 slowpath_indices[VLIB_FRAME_SIZE];
837 u16 n_slowpath, i;
838 u16 next_ip4, next_ip6, next_mpls, next_l2;
839 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
840 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
841 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
842 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
843 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
844 i32 n_left = n_packets;
Zhiyong Yang70312882020-03-27 17:12:35 +0000845 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
846 vlib_buffer_t **b = bufs;
Matthew G Smithd459bf32019-09-04 15:01:04 -0500847 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100848
Zhiyong Yang70312882020-03-27 17:12:35 +0000849 vlib_get_buffers (vm, buffer_indices, b, n_left);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100850
851 while (n_left >= 20)
852 {
853 vlib_buffer_t **ph = b + 16, **pd = b + 8;
Damjan Marion650223c2018-11-14 16:55:53 +0100854
855 vlib_prefetch_buffer_header (ph[0], LOAD);
856 vlib_prefetch_buffer_data (pd[0], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100857 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100858
859 vlib_prefetch_buffer_header (ph[1], LOAD);
860 vlib_prefetch_buffer_data (pd[1], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100861 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100862
863 vlib_prefetch_buffer_header (ph[2], LOAD);
864 vlib_prefetch_buffer_data (pd[2], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100865 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100866
867 vlib_prefetch_buffer_header (ph[3], LOAD);
868 vlib_prefetch_buffer_data (pd[3], LOAD);
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100869 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
Damjan Marion650223c2018-11-14 16:55:53 +0100870
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100871 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100872
873 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000874 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100875 n_left -= 4;
876 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100877 tag += 4;
878 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100879 }
880 while (n_left >= 4)
881 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100882 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
883 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
884 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
885 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
886 eth_input_adv_and_flags_x4 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100887
888 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000889 b += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100890 n_left -= 4;
891 etype += 4;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100892 tag += 4;
893 dmac += 4;
Damjan Marion650223c2018-11-14 16:55:53 +0100894 }
895 while (n_left)
896 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100897 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
898 eth_input_adv_and_flags_x1 (b, main_is_l3);
Damjan Marion650223c2018-11-14 16:55:53 +0100899
900 /* next */
Zhiyong Yang70312882020-03-27 17:12:35 +0000901 b += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100902 n_left -= 1;
903 etype += 1;
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100904 tag += 1;
Zhiyong Yangcbe36e42020-05-09 07:13:34 +0000905 dmac += 1;
Damjan Marion650223c2018-11-14 16:55:53 +0100906 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100907
908 if (dmac_check)
Matthew G Smithd459bf32019-09-04 15:01:04 -0500909 {
Matthew Smith49389382019-10-02 16:34:27 -0500910 if (ei && vec_len (ei->secondary_addrs))
Matthew G Smithd459bf32019-09-04 15:01:04 -0500911 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
912 ei, 1 /* have_sec_dmac */ );
913 else
914 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
915 ei, 0 /* have_sec_dmac */ );
916 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +0100917
918 next_ip4 = em->l3_next.input_next_ip4;
919 next_ip6 = em->l3_next.input_next_ip6;
920 next_mpls = em->l3_next.input_next_mpls;
921 next_l2 = em->l2_next;
922
923 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
924 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
925
926#ifdef CLIB_HAVE_VEC256
927 u16x16 et16_ip4 = u16x16_splat (et_ip4);
928 u16x16 et16_ip6 = u16x16_splat (et_ip6);
929 u16x16 et16_mpls = u16x16_splat (et_mpls);
930 u16x16 et16_vlan = u16x16_splat (et_vlan);
931 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
932 u16x16 next16_ip4 = u16x16_splat (next_ip4);
933 u16x16 next16_ip6 = u16x16_splat (next_ip6);
934 u16x16 next16_mpls = u16x16_splat (next_mpls);
935 u16x16 next16_l2 = u16x16_splat (next_l2);
936 u16x16 zero = { 0 };
937 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
938#endif
939
940 etype = etypes;
941 n_left = n_packets;
942 next = nexts;
943 n_slowpath = 0;
944 i = 0;
945
946 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
947 are considered as slowpath, in l2 mode all untagged packets are
948 considered as fastpath */
949 while (n_left > 0)
950 {
951#ifdef CLIB_HAVE_VEC256
952 if (n_left >= 16)
953 {
954 u16x16 r = zero;
955 u16x16 e16 = u16x16_load_unaligned (etype);
956 if (main_is_l3)
957 {
958 r += (e16 == et16_ip4) & next16_ip4;
959 r += (e16 == et16_ip6) & next16_ip6;
960 r += (e16 == et16_mpls) & next16_mpls;
961 }
962 else
963 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
964 u16x16_store_unaligned (r, next);
965
966 if (!u16x16_is_all_zero (r == zero))
967 {
968 if (u16x16_is_all_zero (r))
969 {
970 u16x16_store_unaligned (u16x16_splat (i) + stairs,
971 slowpath_indices + n_slowpath);
972 n_slowpath += 16;
973 }
974 else
975 {
976 for (int j = 0; j < 16; j++)
977 if (next[j] == 0)
978 slowpath_indices[n_slowpath++] = i + j;
979 }
980 }
981
982 etype += 16;
983 next += 16;
984 n_left -= 16;
985 i += 16;
986 continue;
987 }
988#endif
989 if (main_is_l3 && etype[0] == et_ip4)
990 next[0] = next_ip4;
991 else if (main_is_l3 && etype[0] == et_ip6)
992 next[0] = next_ip6;
993 else if (main_is_l3 && etype[0] == et_mpls)
994 next[0] = next_mpls;
995 else if (main_is_l3 == 0 &&
996 etype[0] != et_vlan && etype[0] != et_dot1ad)
997 next[0] = next_l2;
998 else
999 {
1000 next[0] = 0;
1001 slowpath_indices[n_slowpath++] = i;
1002 }
1003
1004 etype += 1;
1005 next += 1;
1006 n_left -= 1;
1007 i += 1;
1008 }
1009
1010 if (n_slowpath)
1011 {
1012 vnet_main_t *vnm = vnet_get_main ();
1013 n_left = n_slowpath;
1014 u16 *si = slowpath_indices;
1015 u32 last_unknown_etype = ~0;
1016 u32 last_unknown_next = ~0;
1017 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
1018 .mask = -1LL,
1019 .tag = tags[si[0]] ^ -1LL,
1020 .sw_if_index = ~0
1021 };
1022
1023 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
1024
1025 while (n_left)
1026 {
1027 i = si[0];
1028 u16 etype = etypes[i];
1029
1030 if (etype == et_vlan)
1031 {
1032 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1033 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1034 &dot1q_lookup, dmacs_bad[i], 0,
1035 main_is_l3, dmac_check);
1036
1037 }
1038 else if (etype == et_dot1ad)
1039 {
1040 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1041 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
1042 &dot1ad_lookup, dmacs_bad[i], 1,
1043 main_is_l3, dmac_check);
1044 }
1045 else
1046 {
1047 /* untagged packet with not well known etyertype */
1048 if (last_unknown_etype != etype)
1049 {
1050 last_unknown_etype = etype;
1051 etype = clib_host_to_net_u16 (etype);
1052 last_unknown_next = eth_input_next_by_type (etype);
1053 }
1054 if (dmac_check && main_is_l3 && dmacs_bad[i])
1055 {
1056 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1057 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1058 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1059 }
1060 else
1061 nexts[i] = last_unknown_next;
1062 }
1063
1064 /* next */
1065 n_left--;
1066 si++;
1067 }
1068
1069 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1070 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1071 }
1072
1073 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
Damjan Marion650223c2018-11-14 16:55:53 +01001074}
1075
1076static_always_inline void
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001077eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1078 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1079 int ip4_cksum_ok)
Damjan Marion650223c2018-11-14 16:55:53 +01001080{
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001081 ethernet_main_t *em = &ethernet_main;
1082 ethernet_interface_t *ei;
1083 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1084 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1085 subint_config_t *subint0 = &intf0->untagged_subint;
Damjan Marion650223c2018-11-14 16:55:53 +01001086
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001087 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
1088 int promisc = (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) != 0;
Damjan Marion650223c2018-11-14 16:55:53 +01001089
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001090 if (main_is_l3)
Damjan Marion650223c2018-11-14 16:55:53 +01001091 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001092 /* main interface is L3, we dont expect tagged packets and interface
1093 is not in promisc node, so we dont't need to check DMAC */
1094 int is_l3 = 1;
Damjan Marion650223c2018-11-14 16:55:53 +01001095
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001096 if (promisc == 0)
1097 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1098 ip4_cksum_ok, 0);
Damjan Marion650223c2018-11-14 16:55:53 +01001099 else
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001100 /* subinterfaces and promisc mode so DMAC check is needed */
1101 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1102 ip4_cksum_ok, 1);
1103 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001104 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001105 else
Damjan Marion650223c2018-11-14 16:55:53 +01001106 {
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001107 /* untagged packets are treated as L2 */
1108 int is_l3 = 0;
1109 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1110 ip4_cksum_ok, 1);
1111 return;
Damjan Marion650223c2018-11-14 16:55:53 +01001112 }
1113}
1114
1115static_always_inline void
1116ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1117 vlib_frame_t * from_frame)
1118{
1119 u32 *from, n_left;
Benoît Ganne98477922019-04-10 14:21:11 +02001120 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
Damjan Marion650223c2018-11-14 16:55:53 +01001121 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001122 from = vlib_frame_vector_args (from_frame);
1123 n_left = from_frame->n_vectors;
Damjan Marion650223c2018-11-14 16:55:53 +01001124
Dave Barach5ecd5a52019-02-25 15:27:28 -05001125 while (n_left)
Damjan Marion650223c2018-11-14 16:55:53 +01001126 {
Dave Barach5ecd5a52019-02-25 15:27:28 -05001127 ethernet_input_trace_t *t0;
1128 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1129
1130 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1131 {
1132 t0 = vlib_add_trace (vm, node, b0,
1133 sizeof (ethernet_input_trace_t));
1134 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1135 sizeof (t0->packet_data));
1136 t0->frame_flags = from_frame->flags;
1137 clib_memcpy_fast (&t0->frame_data,
1138 vlib_frame_scalar_args (from_frame),
1139 sizeof (ethernet_input_frame_t));
1140 }
1141 from += 1;
1142 n_left -= 1;
Damjan Marion650223c2018-11-14 16:55:53 +01001143 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001144 }
1145
1146 /* rx pcap capture if enabled */
Dave Barach33909772019-09-23 10:27:27 -04001147 if (PREDICT_FALSE (vlib_global_main.pcap.pcap_rx_enable))
Dave Barach5ecd5a52019-02-25 15:27:28 -05001148 {
1149 u32 bi0;
Dave Barach33909772019-09-23 10:27:27 -04001150 vnet_pcap_t *pp = &vlib_global_main.pcap;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001151
1152 from = vlib_frame_vector_args (from_frame);
1153 n_left = from_frame->n_vectors;
1154 while (n_left > 0)
1155 {
Dave Barach9137e542019-09-13 17:47:50 -04001156 int classify_filter_result;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001157 vlib_buffer_t *b0;
1158 bi0 = from[0];
1159 from++;
Dave Barach9137e542019-09-13 17:47:50 -04001160 n_left--;
Dave Barach5ecd5a52019-02-25 15:27:28 -05001161 b0 = vlib_get_buffer (vm, bi0);
Dave Barachf5667c32019-09-25 11:27:46 -04001162 if (pp->filter_classify_table_index != ~0)
Dave Barach9137e542019-09-13 17:47:50 -04001163 {
1164 classify_filter_result =
1165 vnet_is_packet_traced_inline
Dave Barachf5667c32019-09-25 11:27:46 -04001166 (b0, pp->filter_classify_table_index, 0 /* full classify */ );
Dave Barach9137e542019-09-13 17:47:50 -04001167 if (classify_filter_result)
Dave Barach33909772019-09-23 10:27:27 -04001168 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1169 pp->max_bytes_per_pkt);
Dave Barach9137e542019-09-13 17:47:50 -04001170 continue;
1171 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001172
Dave Barach33909772019-09-23 10:27:27 -04001173 if (pp->pcap_sw_if_index == 0 ||
1174 pp->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
Dave Barach5ecd5a52019-02-25 15:27:28 -05001175 {
Dave Barachd28437c2019-11-20 09:28:31 -05001176 vnet_main_t *vnm = vnet_get_main ();
1177 vnet_hw_interface_t *hi =
1178 vnet_get_sup_hw_interface
1179 (vnm, vnet_buffer (b0)->sw_if_index[VLIB_RX]);
1180
1181 /* Capture pkt if not filtered, or if filter hits */
1182 if (hi->trace_classify_table_index == ~0 ||
1183 vnet_is_packet_traced_inline
1184 (b0, hi->trace_classify_table_index,
1185 0 /* full classify */ ))
1186 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1187 pp->max_bytes_per_pkt);
Dave Barach5ecd5a52019-02-25 15:27:28 -05001188 }
Dave Barach5ecd5a52019-02-25 15:27:28 -05001189 }
Damjan Marion650223c2018-11-14 16:55:53 +01001190 }
1191}
1192
1193static_always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001194ethernet_input_inline (vlib_main_t * vm,
1195 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001196 u32 * from, u32 n_packets,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001197 ethernet_input_variant_t variant)
1198{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001199 vnet_main_t *vnm = vnet_get_main ();
1200 ethernet_main_t *em = &ethernet_main;
1201 vlib_node_runtime_t *error_node;
Damjan Marion650223c2018-11-14 16:55:53 +01001202 u32 n_left_from, next_index, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001203 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Damjan Marion067cd622018-07-11 12:47:43 +02001204 u32 thread_index = vm->thread_index;
Dave Barachcfba1e22016-11-16 10:23:50 -05001205 u32 cached_sw_if_index = ~0;
1206 u32 cached_is_l2 = 0; /* shut up gcc */
John Lo1904c472017-03-10 17:15:22 -05001207 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
Matthew Smith42bde452019-11-18 09:35:24 -06001208 ethernet_interface_t *ei = NULL;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001209 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1210 vlib_buffer_t **b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001211
1212 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1213 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1214 else
1215 error_node = node;
1216
Damjan Marion650223c2018-11-14 16:55:53 +01001217 n_left_from = n_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001218
1219 next_index = node->cached_next_index;
1220 stats_sw_if_index = node->runtime_data[0];
1221 stats_n_packets = stats_n_bytes = 0;
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001222 vlib_get_buffers (vm, from, bufs, n_left_from);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001223
1224 while (n_left_from > 0)
1225 {
1226 u32 n_left_to_next;
1227
1228 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1229
1230 while (n_left_from >= 4 && n_left_to_next >= 2)
1231 {
1232 u32 bi0, bi1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001233 vlib_buffer_t *b0, *b1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001234 u8 next0, next1, error0, error1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001235 u16 type0, orig_type0, type1, orig_type1;
1236 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1237 u32 match_flags0, match_flags1;
1238 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1239 new_sw_if_index1, len1;
1240 vnet_hw_interface_t *hi0, *hi1;
1241 main_intf_t *main_intf0, *main_intf1;
1242 vlan_intf_t *vlan_intf0, *vlan_intf1;
1243 qinq_intf_t *qinq_intf0, *qinq_intf1;
1244 u32 is_l20, is_l21;
Dave Barachcfba1e22016-11-16 10:23:50 -05001245 ethernet_header_t *e0, *e1;
Matthew Smith42bde452019-11-18 09:35:24 -06001246 u64 dmacs[2];
1247 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001248
1249 /* Prefetch next iteration. */
1250 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001251 vlib_prefetch_buffer_header (b[2], STORE);
1252 vlib_prefetch_buffer_header (b[3], STORE);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001253
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001254 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1255 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001256 }
1257
1258 bi0 = from[0];
1259 bi1 = from[1];
1260 to_next[0] = bi0;
1261 to_next[1] = bi1;
1262 from += 2;
1263 to_next += 2;
1264 n_left_to_next -= 2;
1265 n_left_from -= 2;
1266
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001267 b0 = b[0];
1268 b1 = b[1];
1269 b += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001270
1271 error0 = error1 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001272 e0 = vlib_buffer_get_current (b0);
1273 type0 = clib_net_to_host_u16 (e0->type);
1274 e1 = vlib_buffer_get_current (b1);
1275 type1 = clib_net_to_host_u16 (e1->type);
1276
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001277 /* Set the L2 header offset for all packets */
1278 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1279 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1280 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1281 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1282
John Locc532852016-12-14 15:42:45 -05001283 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001284 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
Damjan Marionc6969b52018-02-19 12:14:06 +01001285 && !ethernet_frame_is_any_tagged_x2 (type0,
1286 type1)))
Dave Barachcfba1e22016-11-16 10:23:50 -05001287 {
1288 main_intf_t *intf0;
1289 subint_config_t *subint0;
1290 u32 sw_if_index0, sw_if_index1;
1291
1292 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1293 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1294 is_l20 = cached_is_l2;
1295
1296 /* This is probably wholly unnecessary */
1297 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1298 goto slowpath;
1299
John Lo1904c472017-03-10 17:15:22 -05001300 /* Now sw_if_index0 == sw_if_index1 */
Dave Barachcfba1e22016-11-16 10:23:50 -05001301 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1302 {
1303 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001304 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001305 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001306 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001307 subint0 = &intf0->untagged_subint;
1308 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1309 }
John Lo7714b302016-12-20 16:59:02 -05001310
Dave Barachcfba1e22016-11-16 10:23:50 -05001311 if (PREDICT_TRUE (is_l20 != 0))
1312 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001313 vnet_buffer (b0)->l3_hdr_offset =
1314 vnet_buffer (b0)->l2_hdr_offset +
1315 sizeof (ethernet_header_t);
1316 vnet_buffer (b1)->l3_hdr_offset =
1317 vnet_buffer (b1)->l2_hdr_offset +
1318 sizeof (ethernet_header_t);
1319 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1320 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001321 next0 = em->l2_next;
1322 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001323 next1 = em->l2_next;
1324 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001325 }
John Locc532852016-12-14 15:42:45 -05001326 else
1327 {
Matthew Smith42bde452019-11-18 09:35:24 -06001328 dmacs[0] = *(u64 *) e0;
1329 dmacs[1] = *(u64 *) e1;
1330
1331 if (ei && vec_len (ei->secondary_addrs))
1332 ethernet_input_inline_dmac_check (hi, dmacs,
1333 dmacs_bad,
1334 2 /* n_packets */ ,
1335 ei,
1336 1 /* have_sec_dmac */ );
1337 else
1338 ethernet_input_inline_dmac_check (hi, dmacs,
1339 dmacs_bad,
1340 2 /* n_packets */ ,
1341 ei,
1342 0 /* have_sec_dmac */ );
1343
1344 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001345 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001346 if (dmacs_bad[1])
John Lo1904c472017-03-10 17:15:22 -05001347 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001348
John Lob14826e2018-04-18 15:52:23 -04001349 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001350 determine_next_node (em, variant, 0, type0, b0,
1351 &error0, &next0);
John Lob14826e2018-04-18 15:52:23 -04001352 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001353 determine_next_node (em, variant, 0, type1, b1,
1354 &error1, &next1);
John Locc532852016-12-14 15:42:45 -05001355 }
1356 goto ship_it01;
Dave Barachcfba1e22016-11-16 10:23:50 -05001357 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001358
John Locc532852016-12-14 15:42:45 -05001359 /* Slow-path for the tagged case */
1360 slowpath:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001361 parse_header (variant,
1362 b0,
1363 &type0,
1364 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001365
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001366 parse_header (variant,
1367 b1,
1368 &type1,
1369 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001370
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001371 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1372 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001373
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001374 eth_vlan_table_lookups (em,
1375 vnm,
1376 old_sw_if_index0,
1377 orig_type0,
1378 outer_id0,
1379 inner_id0,
1380 &hi0,
1381 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001382
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001383 eth_vlan_table_lookups (em,
1384 vnm,
1385 old_sw_if_index1,
1386 orig_type1,
1387 outer_id1,
1388 inner_id1,
1389 &hi1,
1390 &main_intf1, &vlan_intf1, &qinq_intf1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001391
1392 identify_subint (hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001393 b0,
1394 match_flags0,
1395 main_intf0,
1396 vlan_intf0,
1397 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001398
1399 identify_subint (hi1,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001400 b1,
1401 match_flags1,
1402 main_intf1,
1403 vlan_intf1,
1404 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001405
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001406 // Save RX sw_if_index for later nodes
1407 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1408 error0 !=
1409 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1410 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1411 error1 !=
1412 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001413
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001414 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1415 if (((new_sw_if_index0 != ~0)
1416 && (new_sw_if_index0 != old_sw_if_index0))
1417 || ((new_sw_if_index1 != ~0)
1418 && (new_sw_if_index1 != old_sw_if_index1)))
1419 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001420
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001421 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001422 - vnet_buffer (b0)->l2_hdr_offset;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001423 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001424 - vnet_buffer (b1)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001425
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001426 stats_n_packets += 2;
1427 stats_n_bytes += len0 + len1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001428
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001429 if (PREDICT_FALSE
1430 (!(new_sw_if_index0 == stats_sw_if_index
1431 && new_sw_if_index1 == stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001432 {
1433 stats_n_packets -= 2;
1434 stats_n_bytes -= len0 + len1;
1435
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001436 if (new_sw_if_index0 != old_sw_if_index0
1437 && new_sw_if_index0 != ~0)
1438 vlib_increment_combined_counter (vnm->
1439 interface_main.combined_sw_if_counters
1440 +
1441 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001442 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001443 new_sw_if_index0, 1,
1444 len0);
1445 if (new_sw_if_index1 != old_sw_if_index1
1446 && new_sw_if_index1 != ~0)
1447 vlib_increment_combined_counter (vnm->
1448 interface_main.combined_sw_if_counters
1449 +
1450 VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001451 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001452 new_sw_if_index1, 1,
1453 len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001454
1455 if (new_sw_if_index0 == new_sw_if_index1)
1456 {
1457 if (stats_n_packets > 0)
1458 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001459 vlib_increment_combined_counter
1460 (vnm->interface_main.combined_sw_if_counters
1461 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001462 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001463 stats_sw_if_index,
1464 stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001465 stats_n_packets = stats_n_bytes = 0;
1466 }
1467 stats_sw_if_index = new_sw_if_index0;
1468 }
1469 }
1470 }
1471
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001472 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1473 is_l20 = is_l21 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001474
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001475 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1476 &next0);
1477 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1478 &next1);
1479
John Lo1904c472017-03-10 17:15:22 -05001480 ship_it01:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001481 b0->error = error_node->errors[error0];
1482 b1->error = error_node->errors[error1];
1483
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001484 // verify speculative enqueue
1485 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1486 n_left_to_next, bi0, bi1, next0,
1487 next1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001488 }
1489
1490 while (n_left_from > 0 && n_left_to_next > 0)
1491 {
1492 u32 bi0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001493 vlib_buffer_t *b0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001494 u8 error0, next0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001495 u16 type0, orig_type0;
1496 u16 outer_id0, inner_id0;
1497 u32 match_flags0;
1498 u32 old_sw_if_index0, new_sw_if_index0, len0;
1499 vnet_hw_interface_t *hi0;
1500 main_intf_t *main_intf0;
1501 vlan_intf_t *vlan_intf0;
1502 qinq_intf_t *qinq_intf0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001503 ethernet_header_t *e0;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001504 u32 is_l20;
Matthew Smith42bde452019-11-18 09:35:24 -06001505 u64 dmacs[2];
1506 u8 dmacs_bad[2];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001507
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001508 // Prefetch next iteration
1509 if (n_left_from > 1)
1510 {
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001511 vlib_prefetch_buffer_header (b[1], STORE);
1512 CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001513 }
1514
1515 bi0 = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001516 to_next[0] = bi0;
1517 from += 1;
1518 to_next += 1;
1519 n_left_from -= 1;
1520 n_left_to_next -= 1;
1521
Zhiyong Yangb3ca33f2019-04-24 04:13:27 -04001522 b0 = b[0];
1523 b += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001524
1525 error0 = ETHERNET_ERROR_NONE;
Dave Barachcfba1e22016-11-16 10:23:50 -05001526 e0 = vlib_buffer_get_current (b0);
1527 type0 = clib_net_to_host_u16 (e0->type);
1528
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001529 /* Set the L2 header offset for all packets */
1530 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1531 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1532
John Locc532852016-12-14 15:42:45 -05001533 /* Speed-path for the untagged case */
Dave Barachcfba1e22016-11-16 10:23:50 -05001534 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1535 && !ethernet_frame_is_tagged (type0)))
1536 {
1537 main_intf_t *intf0;
1538 subint_config_t *subint0;
1539 u32 sw_if_index0;
1540
1541 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1542 is_l20 = cached_is_l2;
1543
1544 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1545 {
1546 cached_sw_if_index = sw_if_index0;
John Lo1904c472017-03-10 17:15:22 -05001547 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
Matthew Smith42bde452019-11-18 09:35:24 -06001548 ei = ethernet_get_interface (em, hi->hw_if_index);
John Lo1904c472017-03-10 17:15:22 -05001549 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
Dave Barachcfba1e22016-11-16 10:23:50 -05001550 subint0 = &intf0->untagged_subint;
1551 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1552 }
John Lo7714b302016-12-20 16:59:02 -05001553
John Lo7714b302016-12-20 16:59:02 -05001554
Dave Barachcfba1e22016-11-16 10:23:50 -05001555 if (PREDICT_TRUE (is_l20 != 0))
1556 {
Andrew Yourtchenko20e6d362018-10-05 20:36:03 +02001557 vnet_buffer (b0)->l3_hdr_offset =
1558 vnet_buffer (b0)->l2_hdr_offset +
1559 sizeof (ethernet_header_t);
1560 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
Dave Barachcfba1e22016-11-16 10:23:50 -05001561 next0 = em->l2_next;
1562 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
Dave Barachcfba1e22016-11-16 10:23:50 -05001563 }
John Locc532852016-12-14 15:42:45 -05001564 else
1565 {
Matthew Smith42bde452019-11-18 09:35:24 -06001566 dmacs[0] = *(u64 *) e0;
1567
1568 if (ei && vec_len (ei->secondary_addrs))
1569 ethernet_input_inline_dmac_check (hi, dmacs,
1570 dmacs_bad,
1571 1 /* n_packets */ ,
1572 ei,
1573 1 /* have_sec_dmac */ );
1574 else
1575 ethernet_input_inline_dmac_check (hi, dmacs,
1576 dmacs_bad,
1577 1 /* n_packets */ ,
1578 ei,
1579 0 /* have_sec_dmac */ );
1580
1581 if (dmacs_bad[0])
John Lo1904c472017-03-10 17:15:22 -05001582 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
Matthew Smith42bde452019-11-18 09:35:24 -06001583
Andrew Yourtchenkoe78bca12018-10-10 16:15:55 +02001584 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
John Locc532852016-12-14 15:42:45 -05001585 determine_next_node (em, variant, 0, type0, b0,
1586 &error0, &next0);
John Locc532852016-12-14 15:42:45 -05001587 }
1588 goto ship_it0;
Dave Barachcfba1e22016-11-16 10:23:50 -05001589 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001590
John Locc532852016-12-14 15:42:45 -05001591 /* Slow-path for the tagged case */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001592 parse_header (variant,
1593 b0,
1594 &type0,
1595 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001596
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001597 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001598
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001599 eth_vlan_table_lookups (em,
1600 vnm,
1601 old_sw_if_index0,
1602 orig_type0,
1603 outer_id0,
1604 inner_id0,
1605 &hi0,
1606 &main_intf0, &vlan_intf0, &qinq_intf0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001607
1608 identify_subint (hi0,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001609 b0,
1610 match_flags0,
1611 main_intf0,
1612 vlan_intf0,
1613 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001614
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001615 // Save RX sw_if_index for later nodes
1616 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1617 error0 !=
1618 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001619
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001620 // Increment subinterface stats
1621 // Note that interface-level counters have already been incremented
1622 // prior to calling this function. Thus only subinterface counters
1623 // are incremented here.
1624 //
Damjan Marion607de1a2016-08-16 22:53:54 +02001625 // Interface level counters include packets received on the main
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001626 // interface and all subinterfaces. Subinterface level counters
1627 // include only those packets received on that subinterface
Ed Warnickecb9cada2015-12-08 15:45:58 -07001628 // Increment stats if the subint is valid and it is not the main intf
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001629 if ((new_sw_if_index0 != ~0)
1630 && (new_sw_if_index0 != old_sw_if_index0))
1631 {
Ed Warnickecb9cada2015-12-08 15:45:58 -07001632
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001633 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
Damjan Marion072401e2017-07-13 18:53:27 +02001634 - vnet_buffer (b0)->l2_hdr_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001635
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001636 stats_n_packets += 1;
1637 stats_n_bytes += len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001638
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001639 // Batch stat increments from the same subinterface so counters
Damjan Marion607de1a2016-08-16 22:53:54 +02001640 // don't need to be incremented for every packet.
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001641 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1642 {
1643 stats_n_packets -= 1;
1644 stats_n_bytes -= len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001645
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001646 if (new_sw_if_index0 != ~0)
1647 vlib_increment_combined_counter
1648 (vnm->interface_main.combined_sw_if_counters
1649 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001650 thread_index, new_sw_if_index0, 1, len0);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001651 if (stats_n_packets > 0)
1652 {
1653 vlib_increment_combined_counter
1654 (vnm->interface_main.combined_sw_if_counters
1655 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001656 thread_index,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001657 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1658 stats_n_packets = stats_n_bytes = 0;
1659 }
1660 stats_sw_if_index = new_sw_if_index0;
1661 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001662 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001663
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001664 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1665 is_l20 = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001666
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001667 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1668 &next0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001669
John Lo1904c472017-03-10 17:15:22 -05001670 ship_it0:
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001671 b0->error = error_node->errors[error0];
1672
1673 // verify speculative enqueue
Ed Warnickecb9cada2015-12-08 15:45:58 -07001674 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1675 to_next, n_left_to_next,
1676 bi0, next0);
1677 }
1678
1679 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1680 }
1681
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001682 // Increment any remaining batched stats
1683 if (stats_n_packets > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001684 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001685 vlib_increment_combined_counter
1686 (vnm->interface_main.combined_sw_if_counters
1687 + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +02001688 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001689 node->runtime_data[0] = stats_sw_if_index;
1690 }
Damjan Marion650223c2018-11-14 16:55:53 +01001691}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001692
Damjan Marion5beecec2018-09-10 13:09:21 +02001693VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1694 vlib_node_runtime_t * node,
Damjan Marion650223c2018-11-14 16:55:53 +01001695 vlib_frame_t * frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001696{
Damjan Marion650223c2018-11-14 16:55:53 +01001697 vnet_main_t *vnm = vnet_get_main ();
Damjan Marion650223c2018-11-14 16:55:53 +01001698 u32 *from = vlib_frame_vector_args (frame);
1699 u32 n_packets = frame->n_vectors;
1700
1701 ethernet_input_trace (vm, node, frame);
1702
1703 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1704 {
Damjan Marion650223c2018-11-14 16:55:53 +01001705 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
Damjan Marion650223c2018-11-14 16:55:53 +01001706 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001707 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1708 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
Damjan Marion650223c2018-11-14 16:55:53 +01001709 }
Damjan Marion8d6f34e2018-11-25 21:19:13 +01001710 else
1711 ethernet_input_inline (vm, node, from, n_packets,
1712 ETHERNET_INPUT_VARIANT_ETHERNET);
Damjan Marion650223c2018-11-14 16:55:53 +01001713 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001714}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001715
Damjan Marion5beecec2018-09-10 13:09:21 +02001716VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1717 vlib_node_runtime_t * node,
1718 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001719{
Damjan Marion650223c2018-11-14 16:55:53 +01001720 u32 *from = vlib_frame_vector_args (from_frame);
1721 u32 n_packets = from_frame->n_vectors;
1722 ethernet_input_trace (vm, node, from_frame);
1723 ethernet_input_inline (vm, node, from, n_packets,
1724 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1725 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001726}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001727
Damjan Marion5beecec2018-09-10 13:09:21 +02001728VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1729 vlib_node_runtime_t * node,
1730 vlib_frame_t * from_frame)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001731{
Damjan Marion650223c2018-11-14 16:55:53 +01001732 u32 *from = vlib_frame_vector_args (from_frame);
1733 u32 n_packets = from_frame->n_vectors;
1734 ethernet_input_trace (vm, node, from_frame);
1735 ethernet_input_inline (vm, node, from, n_packets,
1736 ETHERNET_INPUT_VARIANT_NOT_L2);
1737 return n_packets;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001738}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001739
1740
1741// Return the subinterface config struct for the given sw_if_index
1742// Also return via parameter the appropriate match flags for the
1743// configured number of tags.
1744// On error (unsupported or not ethernet) return 0.
1745static subint_config_t *
1746ethernet_sw_interface_get_config (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001747 u32 sw_if_index,
1748 u32 * flags, u32 * unsupported)
1749{
1750 ethernet_main_t *em = &ethernet_main;
1751 vnet_hw_interface_t *hi;
1752 vnet_sw_interface_t *si;
1753 main_intf_t *main_intf;
1754 vlan_table_t *vlan_table;
1755 qinq_table_t *qinq_table;
1756 subint_config_t *subint = 0;
1757
Ed Warnickecb9cada2015-12-08 15:45:58 -07001758 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1759
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001760 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1761 {
1762 *unsupported = 0;
1763 goto done; // non-ethernet interface
1764 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001765
1766 // ensure there's an entry for the main intf (shouldn't really be necessary)
1767 vec_validate (em->main_intfs, hi->hw_if_index);
1768 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1769
1770 // Locate the subint for the given ethernet config
1771 si = vnet_get_sw_interface (vnm, sw_if_index);
1772
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001773 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1774 {
1775 p2p_ethernet_main_t *p2pm = &p2p_main;
1776 u32 p2pe_sw_if_index =
1777 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1778 if (p2pe_sw_if_index == ~0)
1779 {
1780 pool_get (p2pm->p2p_subif_pool, subint);
1781 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1782 }
1783 else
1784 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1785 *flags = SUBINT_CONFIG_P2P;
1786 }
Neale Ranns17ff3c12018-07-04 10:24:24 -07001787 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1788 {
1789 pipe_t *pipe;
1790
1791 pipe = pipe_get (sw_if_index);
1792 subint = &pipe->subint;
1793 *flags = SUBINT_CONFIG_P2P;
1794 }
Pavel Kotucek15ac81c2017-06-20 14:00:26 +02001795 else if (si->sub.eth.flags.default_sub)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001796 {
1797 subint = &main_intf->default_subint;
Mike Bly88076742018-09-24 10:13:06 -07001798 *flags = SUBINT_CONFIG_MATCH_1_TAG |
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001799 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1800 }
1801 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1802 {
1803 // if no flags are set then this is a main interface
1804 // so treat as untagged
1805 subint = &main_intf->untagged_subint;
1806 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1807 }
1808 else
1809 {
1810 // one or two tags
1811 // first get the vlan table
1812 if (si->sub.eth.flags.dot1ad)
1813 {
1814 if (main_intf->dot1ad_vlans == 0)
1815 {
1816 // Allocate a vlan table from the pool
1817 pool_get (em->vlan_pool, vlan_table);
1818 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1819 }
1820 else
1821 {
1822 // Get ptr to existing vlan table
1823 vlan_table =
1824 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1825 }
1826 }
1827 else
1828 { // dot1q
1829 if (main_intf->dot1q_vlans == 0)
1830 {
1831 // Allocate a vlan table from the pool
1832 pool_get (em->vlan_pool, vlan_table);
1833 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1834 }
1835 else
1836 {
1837 // Get ptr to existing vlan table
1838 vlan_table =
1839 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1840 }
1841 }
1842
1843 if (si->sub.eth.flags.one_tag)
1844 {
1845 *flags = si->sub.eth.flags.exact_match ?
1846 SUBINT_CONFIG_MATCH_1_TAG :
1847 (SUBINT_CONFIG_MATCH_1_TAG |
1848 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1849
1850 if (si->sub.eth.flags.outer_vlan_id_any)
1851 {
1852 // not implemented yet
1853 *unsupported = 1;
1854 goto done;
1855 }
1856 else
1857 {
1858 // a single vlan, a common case
1859 subint =
1860 &vlan_table->vlans[si->sub.eth.
1861 outer_vlan_id].single_tag_subint;
1862 }
1863
1864 }
1865 else
1866 {
1867 // Two tags
1868 *flags = si->sub.eth.flags.exact_match ?
1869 SUBINT_CONFIG_MATCH_2_TAG :
1870 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1871
1872 if (si->sub.eth.flags.outer_vlan_id_any
1873 && si->sub.eth.flags.inner_vlan_id_any)
1874 {
1875 // not implemented yet
1876 *unsupported = 1;
1877 goto done;
1878 }
1879
1880 if (si->sub.eth.flags.inner_vlan_id_any)
1881 {
1882 // a specific outer and "any" inner
1883 // don't need a qinq table for this
1884 subint =
1885 &vlan_table->vlans[si->sub.eth.
1886 outer_vlan_id].inner_any_subint;
1887 if (si->sub.eth.flags.exact_match)
1888 {
1889 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1890 }
1891 else
1892 {
1893 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1894 SUBINT_CONFIG_MATCH_3_TAG;
1895 }
1896 }
1897 else
1898 {
1899 // a specific outer + specifc innner vlan id, a common case
1900
1901 // get the qinq table
1902 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1903 {
1904 // Allocate a qinq table from the pool
1905 pool_get (em->qinq_pool, qinq_table);
1906 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1907 qinq_table - em->qinq_pool;
1908 }
1909 else
1910 {
1911 // Get ptr to existing qinq table
1912 qinq_table =
1913 vec_elt_at_index (em->qinq_pool,
1914 vlan_table->vlans[si->sub.
1915 eth.outer_vlan_id].
1916 qinqs);
1917 }
1918 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1919 }
1920 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001921 }
1922
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001923done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001924 return subint;
1925}
1926
Damjan Marion5beecec2018-09-10 13:09:21 +02001927static clib_error_t *
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001928ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001929{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001930 subint_config_t *subint;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001931 u32 dummy_flags;
1932 u32 dummy_unsup;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001933 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001934
1935 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001936 subint =
1937 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1938 &dummy_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001939
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001940 if (subint == 0)
1941 {
1942 // not implemented yet or not ethernet
1943 goto done;
1944 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001945
1946 subint->sw_if_index =
1947 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1948
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001949done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07001950 return error;
1951}
1952
1953VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1954
1955
Damjan Marion5beecec2018-09-10 13:09:21 +02001956#ifndef CLIB_MARCH_VARIANT
Ed Warnickecb9cada2015-12-08 15:45:58 -07001957// Set the L2/L3 mode for the subinterface
1958void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001959ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001960{
1961 subint_config_t *subint;
1962 u32 dummy_flags;
1963 u32 dummy_unsup;
1964 int is_port;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001965 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001966
1967 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1968
1969 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001970 subint =
1971 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1972 &dummy_unsup);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001973
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001974 if (subint == 0)
1975 {
1976 // unimplemented or not ethernet
1977 goto done;
1978 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001979
1980 // Double check that the config we found is for our interface (or the interface is down)
1981 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1982
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07001983 if (l2)
1984 {
1985 subint->flags |= SUBINT_CONFIG_L2;
1986 if (is_port)
1987 subint->flags |=
1988 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
1989 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1990 }
1991 else
1992 {
1993 subint->flags &= ~SUBINT_CONFIG_L2;
1994 if (is_port)
1995 subint->flags &=
1996 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
1997 | SUBINT_CONFIG_MATCH_3_TAG);
1998 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001999
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002000done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002001 return;
2002}
2003
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002004/*
2005 * Set the L2/L3 mode for the subinterface regardless of port
2006 */
2007void
2008ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002009 u32 sw_if_index, u32 l2)
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002010{
2011 subint_config_t *subint;
2012 u32 dummy_flags;
2013 u32 dummy_unsup;
2014
2015 /* Find the config for this subinterface */
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002016 subint =
2017 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
2018 &dummy_unsup);
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002019
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002020 if (subint == 0)
2021 {
2022 /* unimplemented or not ethernet */
2023 goto done;
2024 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002025
2026 /*
2027 * Double check that the config we found is for our interface (or the
2028 * interface is down)
2029 */
2030 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
2031
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002032 if (l2)
2033 {
2034 subint->flags |= SUBINT_CONFIG_L2;
2035 }
2036 else
2037 {
2038 subint->flags &= ~SUBINT_CONFIG_L2;
2039 }
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002040
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002041done:
Christian Dechamplain (cdechamp)07aecbb2016-04-05 10:40:38 -04002042 return;
2043}
Damjan Marion5beecec2018-09-10 13:09:21 +02002044#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07002045
2046static clib_error_t *
2047ethernet_sw_interface_add_del (vnet_main_t * vnm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002048 u32 sw_if_index, u32 is_create)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002049{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002050 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002051 subint_config_t *subint;
2052 u32 match_flags;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002053 u32 unsupported = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002054
2055 // Find the config for this subinterface
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002056 subint =
2057 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
2058 &unsupported);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002059
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002060 if (subint == 0)
2061 {
2062 // not implemented yet or not ethernet
2063 if (unsupported)
2064 {
Damjan Marion607de1a2016-08-16 22:53:54 +02002065 // this is the NYI case
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002066 error = clib_error_return (0, "not implemented yet");
2067 }
2068 goto done;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002069 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002070
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002071 if (!is_create)
2072 {
2073 subint->flags = 0;
2074 return error;
2075 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002076
2077 // Initialize the subint
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002078 if (subint->flags & SUBINT_CONFIG_VALID)
2079 {
2080 // Error vlan already in use
2081 error = clib_error_return (0, "vlan is already in use");
2082 }
2083 else
2084 {
Neale Ranns17ff3c12018-07-04 10:24:24 -07002085 // Note that config is L3 by default
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002086 subint->flags = SUBINT_CONFIG_VALID | match_flags;
2087 subint->sw_if_index = ~0; // because interfaces are initially down
2088 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002089
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002090done:
Ed Warnickecb9cada2015-12-08 15:45:58 -07002091 return error;
2092}
2093
2094VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2095
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002096static char *ethernet_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002097#define ethernet_error(n,c,s) s,
2098#include "error.def"
2099#undef ethernet_error
2100};
2101
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002102/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002103VLIB_REGISTER_NODE (ethernet_input_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002104 .name = "ethernet-input",
2105 /* Takes a vector of packets. */
2106 .vector_size = sizeof (u32),
Damjan Marion650223c2018-11-14 16:55:53 +01002107 .scalar_size = sizeof (ethernet_input_frame_t),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002108 .n_errors = ETHERNET_N_ERROR,
2109 .error_strings = ethernet_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002110 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2111 .next_nodes = {
2112#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2113 foreach_ethernet_input_next
2114#undef _
2115 },
Ed Warnickecb9cada2015-12-08 15:45:58 -07002116 .format_buffer = format_ethernet_header_with_length,
2117 .format_trace = format_ethernet_input_trace,
2118 .unformat_buffer = unformat_ethernet_header,
2119};
2120
Damjan Marion5beecec2018-09-10 13:09:21 +02002121VLIB_REGISTER_NODE (ethernet_input_type_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002122 .name = "ethernet-input-type",
2123 /* Takes a vector of packets. */
2124 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002125 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2126 .next_nodes = {
2127#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2128 foreach_ethernet_input_next
2129#undef _
2130 },
2131};
2132
Damjan Marion5beecec2018-09-10 13:09:21 +02002133VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -07002134 .name = "ethernet-input-not-l2",
2135 /* Takes a vector of packets. */
2136 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002137 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2138 .next_nodes = {
2139#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2140 foreach_ethernet_input_next
2141#undef _
2142 },
2143};
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002144/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002145
Damjan Marion5beecec2018-09-10 13:09:21 +02002146#ifndef CLIB_MARCH_VARIANT
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002147void
2148ethernet_set_rx_redirect (vnet_main_t * vnm,
2149 vnet_hw_interface_t * hi, u32 enable)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002150{
2151 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2152 // don't go directly to ip4-input)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002153 vnet_hw_interface_rx_redirect_to_node
2154 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002155}
2156
2157
2158/*
2159 * Initialization and registration for the next_by_ethernet structure
2160 */
2161
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002162clib_error_t *
2163next_by_ethertype_init (next_by_ethertype_t * l3_next)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002164{
2165 l3_next->input_next_by_type = sparse_vec_new
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002166 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
Ed Warnickecb9cada2015-12-08 15:45:58 -07002167 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2168
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002169 vec_validate (l3_next->sparse_index_by_input_next_index,
2170 ETHERNET_INPUT_NEXT_DROP);
2171 vec_validate (l3_next->sparse_index_by_input_next_index,
2172 ETHERNET_INPUT_NEXT_PUNT);
2173 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2174 SPARSE_VEC_INVALID_INDEX;
2175 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2176 SPARSE_VEC_INVALID_INDEX;
2177
Damjan Marion607de1a2016-08-16 22:53:54 +02002178 /*
2179 * Make sure we don't wipe out an ethernet registration by mistake
Dave Barach1f49ed62016-02-24 11:29:06 -05002180 * Can happen if init function ordering constraints are missing.
2181 */
2182 if (CLIB_DEBUG > 0)
2183 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002184 ethernet_main_t *em = &ethernet_main;
2185 ASSERT (em->next_by_ethertype_register_called == 0);
Dave Barach1f49ed62016-02-24 11:29:06 -05002186 }
2187
Ed Warnickecb9cada2015-12-08 15:45:58 -07002188 return 0;
2189}
2190
2191// Add an ethertype -> next index mapping to the structure
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002192clib_error_t *
2193next_by_ethertype_register (next_by_ethertype_t * l3_next,
2194 u32 ethertype, u32 next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002195{
2196 u32 i;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002197 u16 *n;
2198 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002199
Dave Barach1f49ed62016-02-24 11:29:06 -05002200 if (CLIB_DEBUG > 0)
2201 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002202 ethernet_main_t *em = &ethernet_main;
Dave Barach1f49ed62016-02-24 11:29:06 -05002203 em->next_by_ethertype_register_called = 1;
2204 }
2205
Ed Warnickecb9cada2015-12-08 15:45:58 -07002206 /* Setup ethernet type -> next index sparse vector mapping. */
2207 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2208 n[0] = next_index;
2209
2210 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2211 is updated. */
2212 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2213 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002214 l3_next->
2215 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002216
2217 // do not allow the cached next index's to be updated if L3
2218 // redirect is enabled, as it will have overwritten them
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002219 if (!em->redirect_l3)
2220 {
2221 // Cache common ethertypes directly
2222 if (ethertype == ETHERNET_TYPE_IP4)
2223 {
2224 l3_next->input_next_ip4 = next_index;
2225 }
2226 else if (ethertype == ETHERNET_TYPE_IP6)
2227 {
2228 l3_next->input_next_ip6 = next_index;
2229 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002230 else if (ethertype == ETHERNET_TYPE_MPLS)
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002231 {
2232 l3_next->input_next_mpls = next_index;
2233 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002234 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002235 return 0;
2236}
2237
Dave Barachf8d50682019-05-14 18:01:44 -04002238void
2239ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002240{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002241 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2242 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002243
2244 ethernet_setup_node (vm, ethernet_input_node.index);
2245 ethernet_setup_node (vm, ethernet_input_type_node.index);
2246 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2247
2248 next_by_ethertype_init (&em->l3_next);
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002249
Ed Warnickecb9cada2015-12-08 15:45:58 -07002250 // Initialize pools and vector for vlan parsing
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002251 vec_validate (em->main_intfs, 10); // 10 main interfaces
2252 pool_alloc (em->vlan_pool, 10);
2253 pool_alloc (em->qinq_pool, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002254
2255 // The first vlan pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002256 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002257 // The first qinq pool will always be reserved for an invalid table
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002258 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
Ed Warnickecb9cada2015-12-08 15:45:58 -07002259}
2260
Ed Warnickecb9cada2015-12-08 15:45:58 -07002261void
2262ethernet_register_input_type (vlib_main_t * vm,
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002263 ethernet_type_t type, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002264{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002265 ethernet_main_t *em = &ethernet_main;
2266 ethernet_type_info_t *ti;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002267 u32 i;
2268
2269 {
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002270 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002271 if (error)
2272 clib_error_report (error);
2273 }
2274
2275 ti = ethernet_get_type_info (em, type);
Dave Barach4bda2d92019-07-03 15:21:50 -04002276 if (ti == 0)
2277 {
2278 clib_warning ("type_info NULL for type %d", type);
2279 return;
2280 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002281 ti->node_index = node_index;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002282 ti->next_index = vlib_node_add_next (vm,
2283 ethernet_input_node.index, node_index);
2284 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002285 ASSERT (i == ti->next_index);
2286
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002287 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002288 ASSERT (i == ti->next_index);
2289
2290 // Add the L3 node for this ethertype to the next nodes structure
2291 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2292
2293 // Call the registration functions for other nodes that want a mapping
2294 l2bvi_register_input_type (vm, type, node_index);
2295}
2296
2297void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002298ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002299{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002300 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002301 u32 i;
2302
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002303 em->l2_next =
2304 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002305
Damjan Marion607de1a2016-08-16 22:53:54 +02002306 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002307 * Even if we never use these arcs, we have to align the next indices...
2308 */
2309 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2310
2311 ASSERT (i == em->l2_next);
2312
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002313 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002314 ASSERT (i == em->l2_next);
2315}
2316
2317// Register a next node for L3 redirect, and enable L3 redirect
2318void
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002319ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002320{
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002321 ethernet_main_t *em = &ethernet_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002322 u32 i;
2323
2324 em->redirect_l3 = 1;
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002325 em->redirect_l3_next = vlib_node_add_next (vm,
2326 ethernet_input_node.index,
2327 node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002328 /*
2329 * Change the cached next nodes to the redirect node
2330 */
2331 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2332 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2333 em->l3_next.input_next_mpls = em->redirect_l3_next;
2334
2335 /*
2336 * Even if we never use these arcs, we have to align the next indices...
2337 */
2338 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2339
2340 ASSERT (i == em->redirect_l3_next);
jerryianff82ed62016-12-05 17:13:00 +08002341
2342 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2343
2344 ASSERT (i == em->redirect_l3_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002345}
Damjan Marion5beecec2018-09-10 13:09:21 +02002346#endif
Keith Burns (alagalah)e70dcc82016-08-15 18:33:19 -07002347
2348/*
2349 * fd.io coding-style-patch-verification: ON
2350 *
2351 * Local Variables:
2352 * eval: (c-set-style "gnu")
2353 * End:
2354 */