blob: e0da9e7af062d241e26c4b7e88212d178ec50276 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * decap.c: vxlan tunnel decap packet processing
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070019#include <vnet/vxlan/vxlan.h>
20
Eyal Baria5679e82018-08-26 15:20:07 +030021#ifndef CLIB_MARCH_VARIANT
John Lo13e3d452016-08-09 19:20:51 -040022vlib_node_registration_t vxlan4_input_node;
23vlib_node_registration_t vxlan6_input_node;
Eyal Baria5679e82018-08-26 15:20:07 +030024#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070025
Eyal Bari7d92c092018-07-24 15:15:37 +030026typedef struct
27{
Ed Warnickecb9cada2015-12-08 15:45:58 -070028 u32 next_index;
29 u32 tunnel_index;
30 u32 error;
31 u32 vni;
32} vxlan_rx_trace_t;
33
Eyal Bari7d92c092018-07-24 15:15:37 +030034static u8 *
35format_vxlan_rx_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070036{
37 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
38 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Eyal Bari7d92c092018-07-24 15:15:37 +030039 vxlan_rx_trace_t *t = va_arg (*args, vxlan_rx_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070040
Eyal Bari7d92c092018-07-24 15:15:37 +030041 if (t->tunnel_index == ~0)
42 return format (s, "VXLAN decap error - tunnel for vni %d does not exist",
43 t->vni);
44 return format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
45 t->tunnel_index, t->vni, t->next_index, t->error);
Ed Warnickecb9cada2015-12-08 15:45:58 -070046}
47
Eyal Baridd47eca2018-07-08 08:15:56 +030048typedef vxlan4_tunnel_key_t last_tunnel_cache4;
Eyal Bari0f4b1842018-04-12 12:39:51 +030049
Eyal Bariefd9cf32018-10-02 12:23:06 +030050static const vxlan_decap_info_t decap_not_found = {
51 .sw_if_index = ~0,
52 .next_index = VXLAN_INPUT_NEXT_DROP,
53 .error = VXLAN_ERROR_NO_SUCH_TUNNEL
54};
55
56static const vxlan_decap_info_t decap_bad_flags = {
57 .sw_if_index = ~0,
58 .next_index = VXLAN_INPUT_NEXT_DROP,
59 .error = VXLAN_ERROR_BAD_FLAGS
60};
61
62always_inline vxlan_decap_info_t
Eyal Bari0f4b1842018-04-12 12:39:51 +030063vxlan4_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache4 * cache,
Eyal Bari7d92c092018-07-24 15:15:37 +030064 u32 fib_index, ip4_header_t * ip4_0,
Eyal Bariefd9cf32018-10-02 12:23:06 +030065 vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
Eyal Bari0f4b1842018-04-12 12:39:51 +030066{
Eyal Bariefd9cf32018-10-02 12:23:06 +030067 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
68 return decap_bad_flags;
Eyal Bari0f4b1842018-04-12 12:39:51 +030069
Eyal Bariefd9cf32018-10-02 12:23:06 +030070 /* Make sure VXLAN tunnel exist according to packet S/D IP, VRF, and VNI */
71 u32 dst = ip4_0->dst_address.as_u32;
72 u32 src = ip4_0->src_address.as_u32;
73 vxlan4_tunnel_key_t key4 = {
74 .key[0] = ((u64) dst << 32) | src,
75 .key[1] = ((u64) fib_index << 32) | vxlan0->vni_reserved,
76 };
77
78 if (PREDICT_TRUE
Eyal Bari9be7c6d2018-10-17 17:13:42 +030079 (key4.key[0] == cache->key[0] && key4.key[1] == cache->key[1]))
Eyal Bari7d92c092018-07-24 15:15:37 +030080 {
Eyal Bariefd9cf32018-10-02 12:23:06 +030081 /* cache hit */
82 vxlan_decap_info_t di = {.as_u64 = cache->value };
83 *stats_sw_if_index = di.sw_if_index;
84 return di;
85 }
Eyal Bari0f4b1842018-04-12 12:39:51 +030086
Eyal Bariefd9cf32018-10-02 12:23:06 +030087 int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
88 if (PREDICT_TRUE (rv == 0))
89 {
Eyal Bari7d92c092018-07-24 15:15:37 +030090 *cache = key4;
Eyal Bariefd9cf32018-10-02 12:23:06 +030091 vxlan_decap_info_t di = {.as_u64 = key4.value };
92 *stats_sw_if_index = di.sw_if_index;
93 return di;
Eyal Bari7d92c092018-07-24 15:15:37 +030094 }
Eyal Bari0f4b1842018-04-12 12:39:51 +030095
Eyal Bariefd9cf32018-10-02 12:23:06 +030096 /* try multicast */
97 if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address)))
98 return decap_not_found;
99
100 /* search for mcast decap info by mcast address */
101 key4.key[0] = dst;
102 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
103 if (rv != 0)
104 return decap_not_found;
105
106 /* search for unicast tunnel using the mcast tunnel local(src) ip */
107 vxlan_decap_info_t mdi = {.as_u64 = key4.value };
108 key4.key[0] = ((u64) mdi.local_ip.as_u32 << 32) | src;
109 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
110 if (PREDICT_FALSE (rv != 0))
111 return decap_not_found;
112
113 /* mcast traffic does not update the cache */
114 *stats_sw_if_index = mdi.sw_if_index;
115 vxlan_decap_info_t di = {.as_u64 = key4.value };
116 return di;
Eyal Bari0f4b1842018-04-12 12:39:51 +0300117}
118
Eyal Bari0fa56782018-06-04 12:25:05 +0300119typedef vxlan6_tunnel_key_t last_tunnel_cache6;
120
Eyal Bariefd9cf32018-10-02 12:23:06 +0300121always_inline vxlan_decap_info_t
Eyal Bari0f4b1842018-04-12 12:39:51 +0300122vxlan6_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache6 * cache,
Eyal Bari7d92c092018-07-24 15:15:37 +0300123 u32 fib_index, ip6_header_t * ip6_0,
Eyal Bariefd9cf32018-10-02 12:23:06 +0300124 vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
Eyal Bari0f4b1842018-04-12 12:39:51 +0300125{
Eyal Bariefd9cf32018-10-02 12:23:06 +0300126 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
127 return decap_bad_flags;
128
Eyal Bari0f4b1842018-04-12 12:39:51 +0300129 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
Eyal Bari0fa56782018-06-04 12:25:05 +0300130 vxlan6_tunnel_key_t key6 = {
Eyal Bariefd9cf32018-10-02 12:23:06 +0300131 .key[0] = ip6_0->src_address.as_u64[0],
132 .key[1] = ip6_0->src_address.as_u64[1],
133 .key[2] = (((u64) fib_index) << 32) | vxlan0->vni_reserved,
Eyal Bari0f4b1842018-04-12 12:39:51 +0300134 };
135
Eyal Bari7d92c092018-07-24 15:15:37 +0300136 if (PREDICT_FALSE
137 (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
138 {
139 int rv =
140 clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
141 if (PREDICT_FALSE (rv != 0))
Eyal Bariefd9cf32018-10-02 12:23:06 +0300142 return decap_not_found;
Eyal Bari0f4b1842018-04-12 12:39:51 +0300143
Eyal Bari7d92c092018-07-24 15:15:37 +0300144 *cache = key6;
145 }
146 vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
Eyal Bari0f4b1842018-04-12 12:39:51 +0300147
148 /* Validate VXLAN tunnel SIP against packet DIP */
149 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
Eyal Bariefd9cf32018-10-02 12:23:06 +0300150 *stats_sw_if_index = t0->sw_if_index;
Eyal Bari0f4b1842018-04-12 12:39:51 +0300151 else
Eyal Bari7d92c092018-07-24 15:15:37 +0300152 {
153 /* try multicast */
154 if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address)))
Eyal Bariefd9cf32018-10-02 12:23:06 +0300155 return decap_not_found;
Eyal Bari0f4b1842018-04-12 12:39:51 +0300156
Eyal Bari7d92c092018-07-24 15:15:37 +0300157 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
158 key6.key[0] = ip6_0->dst_address.as_u64[0];
159 key6.key[1] = ip6_0->dst_address.as_u64[1];
160 int rv =
161 clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
162 if (PREDICT_FALSE (rv != 0))
Eyal Bariefd9cf32018-10-02 12:23:06 +0300163 return decap_not_found;
Eyal Bari0fa56782018-06-04 12:25:05 +0300164
Eyal Bariefd9cf32018-10-02 12:23:06 +0300165 vxlan_tunnel_t *mcast_t0 = pool_elt_at_index (vxm->tunnels, key6.value);
166 *stats_sw_if_index = mcast_t0->sw_if_index;
Eyal Bari7d92c092018-07-24 15:15:37 +0300167 }
Eyal Bari0f4b1842018-04-12 12:39:51 +0300168
Eyal Bariefd9cf32018-10-02 12:23:06 +0300169 vxlan_decap_info_t di = {
170 .sw_if_index = t0->sw_if_index,
171 .next_index = t0->decap_next_index,
172 };
173 return di;
Eyal Bari0f4b1842018-04-12 12:39:51 +0300174}
175
Chris Luke99cb3352016-04-26 10:49:53 -0400176always_inline uword
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177vxlan_input (vlib_main_t * vm,
Eyal Bari7d92c092018-07-24 15:15:37 +0300178 vlib_node_runtime_t * node,
179 vlib_frame_t * from_frame, u32 is_ip4)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180{
Eyal Bari7d92c092018-07-24 15:15:37 +0300181 vxlan_main_t *vxm = &vxlan_main;
182 vnet_main_t *vnm = vxm->vnet_main;
183 vnet_interface_main_t *im = &vnm->interface_main;
184 vlib_combined_counter_main_t *rx_counter =
185 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
Eyal Baridd47eca2018-07-08 08:15:56 +0300186 last_tunnel_cache4 last4;
Eyal Bari0fa56782018-06-04 12:25:05 +0300187 last_tunnel_cache6 last6;
Eyal Baria5679e82018-08-26 15:20:07 +0300188 u32 pkts_dropped = 0;
Eyal Bari7d92c092018-07-24 15:15:37 +0300189 u32 thread_index = vlib_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190
Dave Barachf9c231e2016-08-05 10:10:18 -0400191 if (is_ip4)
Dave Barachb7b92992018-10-17 10:38:51 -0400192 clib_memset (&last4, 0xff, sizeof last4);
Dave Barachf9c231e2016-08-05 10:10:18 -0400193 else
Dave Barachb7b92992018-10-17 10:38:51 -0400194 clib_memset (&last6, 0xff, sizeof last6);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195
Eyal Bari7d92c092018-07-24 15:15:37 +0300196 u32 *from = vlib_frame_vector_args (from_frame);
Eyal Barifb663012017-10-19 15:27:51 +0300197 u32 n_left_from = from_frame->n_vectors;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700198
Eyal Baria5679e82018-08-26 15:20:07 +0300199 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
200 vlib_get_buffers (vm, from, bufs, n_left_from);
201
Eyal Bariefd9cf32018-10-02 12:23:06 +0300202 u32 stats_if0 = ~0, stats_if1 = ~0;
Eyal Baria5679e82018-08-26 15:20:07 +0300203 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
204 while (n_left_from >= 4)
205 {
206 /* Prefetch next iteration. */
207 vlib_prefetch_buffer_header (b[2], LOAD);
208 vlib_prefetch_buffer_header (b[3], LOAD);
209
210 /* udp leaves current_data pointing at the vxlan header */
211 void *cur0 = vlib_buffer_get_current (b[0]);
212 void *cur1 = vlib_buffer_get_current (b[1]);
213 vxlan_header_t *vxlan0 = cur0;
214 vxlan_header_t *vxlan1 = cur1;
215
Eyal Baria5679e82018-08-26 15:20:07 +0300216
217 ip4_header_t *ip4_0, *ip4_1;
218 ip6_header_t *ip6_0, *ip6_1;
219 if (is_ip4)
220 {
221 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
222 ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
223 }
224 else
225 {
226 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
227 ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
228 }
229
230 /* pop vxlan */
231 vlib_buffer_advance (b[0], sizeof *vxlan0);
232 vlib_buffer_advance (b[1], sizeof *vxlan1);
233
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000234 u32 fi0 = vlib_buffer_get_ip_fib_index (b[0], is_ip4);
235 u32 fi1 = vlib_buffer_get_ip_fib_index (b[1], is_ip4);
Eyal Baria5679e82018-08-26 15:20:07 +0300236
Eyal Bariefd9cf32018-10-02 12:23:06 +0300237 vxlan_decap_info_t di0 = is_ip4 ?
238 vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
239 vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
240 vxlan_decap_info_t di1 = is_ip4 ?
241 vxlan4_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan1, &stats_if1) :
242 vxlan6_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan1, &stats_if1);
Eyal Baria5679e82018-08-26 15:20:07 +0300243
244 /* Prefetch next iteration. */
245 CLIB_PREFETCH (b[2]->data, CLIB_CACHE_LINE_BYTES, LOAD);
246 CLIB_PREFETCH (b[3]->data, CLIB_CACHE_LINE_BYTES, LOAD);
247
248 u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
249 u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
250
Eyal Bariefd9cf32018-10-02 12:23:06 +0300251 next[0] = di0.next_index;
252 next[1] = di1.next_index;
Eyal Baria5679e82018-08-26 15:20:07 +0300253
Eyal Bariefd9cf32018-10-02 12:23:06 +0300254 u8 any_error = di0.error | di1.error;
255 if (PREDICT_TRUE (any_error == 0))
256 {
257 /* Required to make the l2 tag push / pop code work on l2 subifs */
258 vnet_update_l2_len (b[0]);
259 vnet_update_l2_len (b[1]);
260 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
261 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
262 vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
263 vlib_increment_combined_counter (rx_counter, thread_index,
264 stats_if0, 1, len0);
265 vlib_increment_combined_counter (rx_counter, thread_index,
266 stats_if1, 1, len1);
Eyal Baria5679e82018-08-26 15:20:07 +0300267 }
268 else
269 {
Eyal Bariefd9cf32018-10-02 12:23:06 +0300270 if (di0.error == 0)
Eyal Baria5679e82018-08-26 15:20:07 +0300271 {
Eyal Bariefd9cf32018-10-02 12:23:06 +0300272 vnet_update_l2_len (b[0]);
273 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
274 vlib_increment_combined_counter (rx_counter, thread_index,
275 stats_if0, 1, len0);
Eyal Baria5679e82018-08-26 15:20:07 +0300276 }
Eyal Bariefd9cf32018-10-02 12:23:06 +0300277 else
278 {
279 b[0]->error = node->errors[di0.error];
280 pkts_dropped++;
281 }
Eyal Baria5679e82018-08-26 15:20:07 +0300282
Eyal Bariefd9cf32018-10-02 12:23:06 +0300283 if (di1.error == 0)
284 {
285 vnet_update_l2_len (b[1]);
286 vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
287 vlib_increment_combined_counter (rx_counter, thread_index,
288 stats_if1, 1, len1);
289 }
290 else
291 {
292 b[1]->error = node->errors[di1.error];
293 pkts_dropped++;
294 }
Eyal Baria5679e82018-08-26 15:20:07 +0300295 }
296
297 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
298 {
299 vxlan_rx_trace_t *tr =
300 vlib_add_trace (vm, node, b[0], sizeof (*tr));
301 tr->next_index = next[0];
Eyal Bariefd9cf32018-10-02 12:23:06 +0300302 tr->error = di0.error;
303 tr->tunnel_index = di0.sw_if_index == ~0 ?
304 ~0 : vxm->tunnel_index_by_sw_if_index[di0.sw_if_index];
Eyal Baria5679e82018-08-26 15:20:07 +0300305 tr->vni = vnet_get_vni (vxlan0);
306 }
307 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
308 {
309 vxlan_rx_trace_t *tr =
310 vlib_add_trace (vm, node, b[1], sizeof (*tr));
311 tr->next_index = next[1];
Eyal Bariefd9cf32018-10-02 12:23:06 +0300312 tr->error = di1.error;
313 tr->tunnel_index = di1.sw_if_index == ~0 ?
314 ~0 : vxm->tunnel_index_by_sw_if_index[di1.sw_if_index];
Eyal Baria5679e82018-08-26 15:20:07 +0300315 tr->vni = vnet_get_vni (vxlan1);
316 }
317 b += 2;
318 next += 2;
319 n_left_from -= 2;
320 }
321
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322 while (n_left_from > 0)
323 {
Eyal Baria5679e82018-08-26 15:20:07 +0300324 /* udp leaves current_data pointing at the vxlan header */
325 void *cur0 = vlib_buffer_get_current (b[0]);
326 vxlan_header_t *vxlan0 = cur0;
Eyal Baria5679e82018-08-26 15:20:07 +0300327 ip4_header_t *ip4_0;
328 ip6_header_t *ip6_0;
329 if (is_ip4)
330 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
331 else
332 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
Eyal Barifb663012017-10-19 15:27:51 +0300333
Eyal Baria5679e82018-08-26 15:20:07 +0300334 /* pop (ip, udp, vxlan) */
335 vlib_buffer_advance (b[0], sizeof (*vxlan0));
336
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000337 u32 fi0 = vlib_buffer_get_ip_fib_index (b[0], is_ip4);
Eyal Baria5679e82018-08-26 15:20:07 +0300338
Eyal Bariefd9cf32018-10-02 12:23:06 +0300339 vxlan_decap_info_t di0 = is_ip4 ?
340 vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
341 vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
Eyal Baria5679e82018-08-26 15:20:07 +0300342
Eyal Baria5679e82018-08-26 15:20:07 +0300343 uword len0 = vlib_buffer_length_in_chain (vm, b[0]);
344
Eyal Bariefd9cf32018-10-02 12:23:06 +0300345 next[0] = di0.next_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346
Eyal Bariefd9cf32018-10-02 12:23:06 +0300347 /* Validate VXLAN tunnel encap-fib index against packet */
348 if (di0.error == 0)
349 {
350 /* Required to make the l2 tag push / pop code work on l2 subifs */
351 vnet_update_l2_len (b[0]);
352
353 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
354 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
355
356 vlib_increment_combined_counter (rx_counter, thread_index,
357 stats_if0, 1, len0);
Eyal Baria5679e82018-08-26 15:20:07 +0300358 }
359 else
360 {
Eyal Bariefd9cf32018-10-02 12:23:06 +0300361 b[0]->error = node->errors[di0.error];
362 pkts_dropped++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700363 }
364
Eyal Baria5679e82018-08-26 15:20:07 +0300365 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700366 {
Eyal Baria5679e82018-08-26 15:20:07 +0300367 vxlan_rx_trace_t *tr
368 = vlib_add_trace (vm, node, b[0], sizeof (*tr));
369 tr->next_index = next[0];
Eyal Bariefd9cf32018-10-02 12:23:06 +0300370 tr->error = di0.error;
371 tr->tunnel_index = di0.sw_if_index == ~0 ?
372 ~0 : vxm->tunnel_index_by_sw_if_index[di0.sw_if_index];
Eyal Baria5679e82018-08-26 15:20:07 +0300373 tr->vni = vnet_get_vni (vxlan0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700374 }
Eyal Baria5679e82018-08-26 15:20:07 +0300375 b += 1;
376 next += 1;
377 n_left_from -= 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378 }
Eyal Baria5679e82018-08-26 15:20:07 +0300379 vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
Eyal Barifb663012017-10-19 15:27:51 +0300380 /* Do we still need this now that tunnel tx stats is kept? */
381 u32 node_idx = is_ip4 ? vxlan4_input_node.index : vxlan6_input_node.index;
382 vlib_node_increment_counter (vm, node_idx, VXLAN_ERROR_DECAPSULATED,
Eyal Baria5679e82018-08-26 15:20:07 +0300383 from_frame->n_vectors - pkts_dropped);
Eyal Barifb663012017-10-19 15:27:51 +0300384
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385 return from_frame->n_vectors;
386}
387
Eyal Baria5679e82018-08-26 15:20:07 +0300388VLIB_NODE_FN (vxlan4_input_node) (vlib_main_t * vm,
389 vlib_node_runtime_t * node,
390 vlib_frame_t * from_frame)
Chris Luke99cb3352016-04-26 10:49:53 -0400391{
Eyal Bari7d92c092018-07-24 15:15:37 +0300392 return vxlan_input (vm, node, from_frame, /* is_ip4 */ 1);
Chris Luke99cb3352016-04-26 10:49:53 -0400393}
394
Eyal Baria5679e82018-08-26 15:20:07 +0300395VLIB_NODE_FN (vxlan6_input_node) (vlib_main_t * vm,
396 vlib_node_runtime_t * node,
397 vlib_frame_t * from_frame)
Chris Luke99cb3352016-04-26 10:49:53 -0400398{
Eyal Bari7d92c092018-07-24 15:15:37 +0300399 return vxlan_input (vm, node, from_frame, /* is_ip4 */ 0);
Chris Luke99cb3352016-04-26 10:49:53 -0400400}
401
Eyal Bari7d92c092018-07-24 15:15:37 +0300402static char *vxlan_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700403#define vxlan_error(n,s) s,
404#include <vnet/vxlan/vxlan_error.def>
405#undef vxlan_error
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406};
407
Eyal Bari7d92c092018-07-24 15:15:37 +0300408/* *INDENT-OFF* */
409VLIB_REGISTER_NODE (vxlan4_input_node) =
410{
Chris Luke99cb3352016-04-26 10:49:53 -0400411 .name = "vxlan4-input",
Chris Luke99cb3352016-04-26 10:49:53 -0400412 .vector_size = sizeof (u32),
Chris Luke99cb3352016-04-26 10:49:53 -0400413 .n_errors = VXLAN_N_ERROR,
414 .error_strings = vxlan_error_strings,
Chris Luke99cb3352016-04-26 10:49:53 -0400415 .n_next_nodes = VXLAN_INPUT_N_NEXT,
Eyal Bari7d92c092018-07-24 15:15:37 +0300416 .format_trace = format_vxlan_rx_trace,
Chris Luke99cb3352016-04-26 10:49:53 -0400417 .next_nodes = {
418#define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
419 foreach_vxlan_input_next
420#undef _
421 },
Chris Luke99cb3352016-04-26 10:49:53 -0400422};
Damjan Marion1c80e832016-05-11 23:07:18 +0200423
Eyal Bari7d92c092018-07-24 15:15:37 +0300424VLIB_REGISTER_NODE (vxlan6_input_node) =
425{
Chris Luke99cb3352016-04-26 10:49:53 -0400426 .name = "vxlan6-input",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700427 .vector_size = sizeof (u32),
Ed Warnickecb9cada2015-12-08 15:45:58 -0700428 .n_errors = VXLAN_N_ERROR,
429 .error_strings = vxlan_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430 .n_next_nodes = VXLAN_INPUT_N_NEXT,
431 .next_nodes = {
432#define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
433 foreach_vxlan_input_next
434#undef _
435 },
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436 .format_trace = format_vxlan_rx_trace,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437};
Eyal Bari7d92c092018-07-24 15:15:37 +0300438/* *INDENT-ON* */
Damjan Marion1c80e832016-05-11 23:07:18 +0200439
Eyal Bari7d92c092018-07-24 15:15:37 +0300440typedef enum
441{
John Lo37682e12016-11-30 12:51:39 -0500442 IP_VXLAN_BYPASS_NEXT_DROP,
443 IP_VXLAN_BYPASS_NEXT_VXLAN,
444 IP_VXLAN_BYPASS_N_NEXT,
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -0700445} ip_vxlan_bypass_next_t;
John Lo37682e12016-11-30 12:51:39 -0500446
447always_inline uword
448ip_vxlan_bypass_inline (vlib_main_t * vm,
John Lo2b81eb82017-01-30 13:12:10 -0500449 vlib_node_runtime_t * node,
Eyal Bari7d92c092018-07-24 15:15:37 +0300450 vlib_frame_t * frame, u32 is_ip4)
John Lo37682e12016-11-30 12:51:39 -0500451{
Eyal Bari7d92c092018-07-24 15:15:37 +0300452 vxlan_main_t *vxm = &vxlan_main;
453 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
454 vlib_node_runtime_t *error_node =
455 vlib_node_get_runtime (vm, ip4_input_node.index);
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000456 vtep4_key_t last_vtep4; /* last IPv4 address / fib index
457 matching a local VTEP address */
458 vtep6_key_t last_vtep6; /* last IPv6 address / fib index
459 matching a local VTEP address */
Zhiyong Yang5e524172020-07-08 20:28:36 +0000460 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
461
462#ifdef CLIB_HAVE_VEC512
463 vtep4_cache_t vtep4_u512;
464 clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
465#endif
John Lo37682e12016-11-30 12:51:39 -0500466
467 from = vlib_frame_vector_args (frame);
468 n_left_from = frame->n_vectors;
469 next_index = node->cached_next_index;
470
Zhiyong Yang5e524172020-07-08 20:28:36 +0000471 vlib_get_buffers (vm, from, bufs, n_left_from);
472
John Lo37682e12016-11-30 12:51:39 -0500473 if (node->flags & VLIB_NODE_FLAG_TRACE)
474 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
475
Eyal Bari7d92c092018-07-24 15:15:37 +0300476 if (is_ip4)
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000477 vtep4_key_init (&last_vtep4);
Eyal Bari7d92c092018-07-24 15:15:37 +0300478 else
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000479 vtep6_key_init (&last_vtep6);
John Lo37682e12016-11-30 12:51:39 -0500480
481 while (n_left_from > 0)
482 {
483 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
484
485 while (n_left_from >= 4 && n_left_to_next >= 2)
Eyal Bari7d92c092018-07-24 15:15:37 +0300486 {
487 vlib_buffer_t *b0, *b1;
488 ip4_header_t *ip40, *ip41;
489 ip6_header_t *ip60, *ip61;
490 udp_header_t *udp0, *udp1;
491 u32 bi0, ip_len0, udp_len0, flags0, next0;
492 u32 bi1, ip_len1, udp_len1, flags1, next1;
493 i32 len_diff0, len_diff1;
494 u8 error0, good_udp0, proto0;
495 u8 error1, good_udp1, proto1;
John Lo37682e12016-11-30 12:51:39 -0500496
497 /* Prefetch next iteration. */
498 {
Zhiyong Yang5e524172020-07-08 20:28:36 +0000499 vlib_prefetch_buffer_header (b[2], LOAD);
500 vlib_prefetch_buffer_header (b[3], LOAD);
John Lo37682e12016-11-30 12:51:39 -0500501
Zhiyong Yang5e524172020-07-08 20:28:36 +0000502 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
503 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
John Lo37682e12016-11-30 12:51:39 -0500504 }
505
Eyal Bari7d92c092018-07-24 15:15:37 +0300506 bi0 = to_next[0] = from[0];
507 bi1 = to_next[1] = from[1];
508 from += 2;
509 n_left_from -= 2;
510 to_next += 2;
511 n_left_to_next -= 2;
John Lo37682e12016-11-30 12:51:39 -0500512
Zhiyong Yang5e524172020-07-08 20:28:36 +0000513 b0 = b[0];
514 b1 = b[1];
515 b += 2;
John Lo2b81eb82017-01-30 13:12:10 -0500516 if (is_ip4)
517 {
518 ip40 = vlib_buffer_get_current (b0);
519 ip41 = vlib_buffer_get_current (b1);
520 }
521 else
522 {
523 ip60 = vlib_buffer_get_current (b0);
524 ip61 = vlib_buffer_get_current (b1);
525 }
John Lo37682e12016-11-30 12:51:39 -0500526
527 /* Setup packet for next IP feature */
Eyal Bari7d92c092018-07-24 15:15:37 +0300528 vnet_feature_next (&next0, b0);
529 vnet_feature_next (&next1, b1);
John Lo37682e12016-11-30 12:51:39 -0500530
John Lo2b81eb82017-01-30 13:12:10 -0500531 if (is_ip4)
532 {
533 /* Treat IP frag packets as "experimental" protocol for now
Eyal Bari7d92c092018-07-24 15:15:37 +0300534 until support of IP frag reassembly is implemented */
535 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
536 proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
John Lo2b81eb82017-01-30 13:12:10 -0500537 }
538 else
539 {
540 proto0 = ip60->protocol;
541 proto1 = ip61->protocol;
542 }
John Lo37682e12016-11-30 12:51:39 -0500543
544 /* Process packet 0 */
545 if (proto0 != IP_PROTOCOL_UDP)
Eyal Bari7d92c092018-07-24 15:15:37 +0300546 goto exit0; /* not UDP packet */
John Lo37682e12016-11-30 12:51:39 -0500547
John Lo2b81eb82017-01-30 13:12:10 -0500548 if (is_ip4)
549 udp0 = ip4_next_header (ip40);
550 else
551 udp0 = ip6_next_header (ip60);
552
John Lo37682e12016-11-30 12:51:39 -0500553 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
Eyal Bari7d92c092018-07-24 15:15:37 +0300554 goto exit0; /* not VXLAN packet */
John Lo37682e12016-11-30 12:51:39 -0500555
Eyal Bari7d92c092018-07-24 15:15:37 +0300556 /* Validate DIP against VTEPs */
Eyal Bari0f4b1842018-04-12 12:39:51 +0300557 if (is_ip4)
John Lo37682e12016-11-30 12:51:39 -0500558 {
Zhiyong Yang5e524172020-07-08 20:28:36 +0000559#ifdef CLIB_HAVE_VEC512
560 if (!vtep4_check_vector
561 (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
562#else
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000563 if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
Zhiyong Yang5e524172020-07-08 20:28:36 +0000564#endif
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000565 goto exit0; /* no local VTEP for VXLAN packet */
John Lo37682e12016-11-30 12:51:39 -0500566 }
John Lo2b81eb82017-01-30 13:12:10 -0500567 else
568 {
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000569 if (!vtep6_check (&vxm->vtep_table, b0, ip60, &last_vtep6))
570 goto exit0; /* no local VTEP for VXLAN packet */
John Lo2b81eb82017-01-30 13:12:10 -0500571 }
John Lo37682e12016-11-30 12:51:39 -0500572
573 flags0 = b0->flags;
Damjan Marion213b5aa2017-07-13 21:19:27 +0200574 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
John Lo37682e12016-11-30 12:51:39 -0500575
576 /* Don't verify UDP checksum for packets with explicit zero checksum. */
577 good_udp0 |= udp0->checksum == 0;
578
579 /* Verify UDP length */
John Lo2b81eb82017-01-30 13:12:10 -0500580 if (is_ip4)
581 ip_len0 = clib_net_to_host_u16 (ip40->length);
582 else
583 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
John Lo37682e12016-11-30 12:51:39 -0500584 udp_len0 = clib_net_to_host_u16 (udp0->length);
John Lo37682e12016-11-30 12:51:39 -0500585 len_diff0 = ip_len0 - udp_len0;
586
587 /* Verify UDP checksum */
588 if (PREDICT_FALSE (!good_udp0))
589 {
Zhiyong Yang47ea4c32020-06-10 13:23:50 +0000590 if (is_ip4)
591 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
592 else
593 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
594 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
John Lo37682e12016-11-30 12:51:39 -0500595 }
596
John Lo2b81eb82017-01-30 13:12:10 -0500597 if (is_ip4)
598 {
599 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
600 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
601 }
602 else
603 {
604 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
605 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
606 }
John Lo37682e12016-11-30 12:51:39 -0500607
Eyal Bari0f4b1842018-04-12 12:39:51 +0300608 next0 = error0 ?
John Lo37682e12016-11-30 12:51:39 -0500609 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
610 b0->error = error0 ? error_node->errors[error0] : 0;
611
John Lo2b81eb82017-01-30 13:12:10 -0500612 /* vxlan-input node expect current at VXLAN header */
613 if (is_ip4)
Eyal Bari7d92c092018-07-24 15:15:37 +0300614 vlib_buffer_advance (b0,
615 sizeof (ip4_header_t) +
616 sizeof (udp_header_t));
John Lo2b81eb82017-01-30 13:12:10 -0500617 else
Eyal Bari7d92c092018-07-24 15:15:37 +0300618 vlib_buffer_advance (b0,
619 sizeof (ip6_header_t) +
620 sizeof (udp_header_t));
John Lo2b81eb82017-01-30 13:12:10 -0500621
John Lo37682e12016-11-30 12:51:39 -0500622 exit0:
623 /* Process packet 1 */
624 if (proto1 != IP_PROTOCOL_UDP)
Eyal Bari7d92c092018-07-24 15:15:37 +0300625 goto exit1; /* not UDP packet */
John Lo37682e12016-11-30 12:51:39 -0500626
John Lo2b81eb82017-01-30 13:12:10 -0500627 if (is_ip4)
628 udp1 = ip4_next_header (ip41);
629 else
630 udp1 = ip6_next_header (ip61);
631
John Lo37682e12016-11-30 12:51:39 -0500632 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
Eyal Bari7d92c092018-07-24 15:15:37 +0300633 goto exit1; /* not VXLAN packet */
John Lo37682e12016-11-30 12:51:39 -0500634
Eyal Bari7d92c092018-07-24 15:15:37 +0300635 /* Validate DIP against VTEPs */
Eyal Bari0f4b1842018-04-12 12:39:51 +0300636 if (is_ip4)
John Lo37682e12016-11-30 12:51:39 -0500637 {
Zhiyong Yang5e524172020-07-08 20:28:36 +0000638#ifdef CLIB_HAVE_VEC512
639 if (!vtep4_check_vector
640 (&vxm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
641#else
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000642 if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
Zhiyong Yang5e524172020-07-08 20:28:36 +0000643#endif
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000644 goto exit1; /* no local VTEP for VXLAN packet */
John Lo37682e12016-11-30 12:51:39 -0500645 }
John Lo2b81eb82017-01-30 13:12:10 -0500646 else
647 {
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000648 if (!vtep6_check (&vxm->vtep_table, b1, ip61, &last_vtep6))
649 goto exit1; /* no local VTEP for VXLAN packet */
John Lo2b81eb82017-01-30 13:12:10 -0500650 }
John Lo37682e12016-11-30 12:51:39 -0500651
652 flags1 = b1->flags;
Damjan Marion213b5aa2017-07-13 21:19:27 +0200653 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
John Lo37682e12016-11-30 12:51:39 -0500654
655 /* Don't verify UDP checksum for packets with explicit zero checksum. */
656 good_udp1 |= udp1->checksum == 0;
657
658 /* Verify UDP length */
John Lo2b81eb82017-01-30 13:12:10 -0500659 if (is_ip4)
660 ip_len1 = clib_net_to_host_u16 (ip41->length);
661 else
662 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
John Lo37682e12016-11-30 12:51:39 -0500663 udp_len1 = clib_net_to_host_u16 (udp1->length);
John Lo37682e12016-11-30 12:51:39 -0500664 len_diff1 = ip_len1 - udp_len1;
665
666 /* Verify UDP checksum */
667 if (PREDICT_FALSE (!good_udp1))
668 {
Zhiyong Yang47ea4c32020-06-10 13:23:50 +0000669 if (is_ip4)
670 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
671 else
672 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
673 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
John Lo37682e12016-11-30 12:51:39 -0500674 }
675
John Lo2b81eb82017-01-30 13:12:10 -0500676 if (is_ip4)
677 {
678 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
679 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
680 }
681 else
682 {
Eyal Baria93ea422017-02-01 13:36:15 +0200683 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
684 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
John Lo2b81eb82017-01-30 13:12:10 -0500685 }
John Lo37682e12016-11-30 12:51:39 -0500686
Eyal Bari0f4b1842018-04-12 12:39:51 +0300687 next1 = error1 ?
John Lo37682e12016-11-30 12:51:39 -0500688 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
689 b1->error = error1 ? error_node->errors[error1] : 0;
Eyal Bari0f4b1842018-04-12 12:39:51 +0300690
John Lo2b81eb82017-01-30 13:12:10 -0500691 /* vxlan-input node expect current at VXLAN header */
692 if (is_ip4)
Eyal Bari7d92c092018-07-24 15:15:37 +0300693 vlib_buffer_advance (b1,
694 sizeof (ip4_header_t) +
695 sizeof (udp_header_t));
John Lo2b81eb82017-01-30 13:12:10 -0500696 else
Eyal Bari7d92c092018-07-24 15:15:37 +0300697 vlib_buffer_advance (b1,
698 sizeof (ip6_header_t) +
699 sizeof (udp_header_t));
John Lo2b81eb82017-01-30 13:12:10 -0500700
John Lo37682e12016-11-30 12:51:39 -0500701 exit1:
702 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
703 to_next, n_left_to_next,
704 bi0, bi1, next0, next1);
705 }
706
707 while (n_left_from > 0 && n_left_to_next > 0)
708 {
Eyal Bari7d92c092018-07-24 15:15:37 +0300709 vlib_buffer_t *b0;
710 ip4_header_t *ip40;
711 ip6_header_t *ip60;
712 udp_header_t *udp0;
713 u32 bi0, ip_len0, udp_len0, flags0, next0;
John Lo37682e12016-11-30 12:51:39 -0500714 i32 len_diff0;
715 u8 error0, good_udp0, proto0;
716
717 bi0 = to_next[0] = from[0];
718 from += 1;
719 n_left_from -= 1;
720 to_next += 1;
721 n_left_to_next -= 1;
722
Zhiyong Yang5e524172020-07-08 20:28:36 +0000723 b0 = b[0];
724 b++;
John Lo2b81eb82017-01-30 13:12:10 -0500725 if (is_ip4)
726 ip40 = vlib_buffer_get_current (b0);
727 else
728 ip60 = vlib_buffer_get_current (b0);
John Lo37682e12016-11-30 12:51:39 -0500729
730 /* Setup packet for next IP feature */
Eyal Bari7d92c092018-07-24 15:15:37 +0300731 vnet_feature_next (&next0, b0);
John Lo37682e12016-11-30 12:51:39 -0500732
John Lo2b81eb82017-01-30 13:12:10 -0500733 if (is_ip4)
734 /* Treat IP4 frag packets as "experimental" protocol for now
735 until support of IP frag reassembly is implemented */
Eyal Bari7d92c092018-07-24 15:15:37 +0300736 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
John Lo2b81eb82017-01-30 13:12:10 -0500737 else
738 proto0 = ip60->protocol;
John Lo37682e12016-11-30 12:51:39 -0500739
740 if (proto0 != IP_PROTOCOL_UDP)
Eyal Bari7d92c092018-07-24 15:15:37 +0300741 goto exit; /* not UDP packet */
John Lo37682e12016-11-30 12:51:39 -0500742
John Lo2b81eb82017-01-30 13:12:10 -0500743 if (is_ip4)
744 udp0 = ip4_next_header (ip40);
745 else
746 udp0 = ip6_next_header (ip60);
747
John Lo37682e12016-11-30 12:51:39 -0500748 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
Eyal Bari7d92c092018-07-24 15:15:37 +0300749 goto exit; /* not VXLAN packet */
John Lo37682e12016-11-30 12:51:39 -0500750
Eyal Bari7d92c092018-07-24 15:15:37 +0300751 /* Validate DIP against VTEPs */
Eyal Bari0f4b1842018-04-12 12:39:51 +0300752 if (is_ip4)
John Lo37682e12016-11-30 12:51:39 -0500753 {
Zhiyong Yang5e524172020-07-08 20:28:36 +0000754#ifdef CLIB_HAVE_VEC512
755 if (!vtep4_check_vector
756 (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
757#else
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000758 if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
Zhiyong Yang5e524172020-07-08 20:28:36 +0000759#endif
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000760 goto exit; /* no local VTEP for VXLAN packet */
John Lo37682e12016-11-30 12:51:39 -0500761 }
John Lo2b81eb82017-01-30 13:12:10 -0500762 else
763 {
Nick Zavaritsky27518c22020-02-27 15:54:58 +0000764 if (!vtep6_check (&vxm->vtep_table, b0, ip60, &last_vtep6))
765 goto exit; /* no local VTEP for VXLAN packet */
John Lo2b81eb82017-01-30 13:12:10 -0500766 }
John Lo37682e12016-11-30 12:51:39 -0500767
768 flags0 = b0->flags;
Damjan Marion213b5aa2017-07-13 21:19:27 +0200769 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
John Lo37682e12016-11-30 12:51:39 -0500770
771 /* Don't verify UDP checksum for packets with explicit zero checksum. */
772 good_udp0 |= udp0->checksum == 0;
773
774 /* Verify UDP length */
John Lo2b81eb82017-01-30 13:12:10 -0500775 if (is_ip4)
776 ip_len0 = clib_net_to_host_u16 (ip40->length);
777 else
778 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
John Lo37682e12016-11-30 12:51:39 -0500779 udp_len0 = clib_net_to_host_u16 (udp0->length);
John Lo37682e12016-11-30 12:51:39 -0500780 len_diff0 = ip_len0 - udp_len0;
781
782 /* Verify UDP checksum */
783 if (PREDICT_FALSE (!good_udp0))
784 {
Zhiyong Yang47ea4c32020-06-10 13:23:50 +0000785 if (is_ip4)
786 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
787 else
788 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
789 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
John Lo37682e12016-11-30 12:51:39 -0500790 }
791
John Lo2b81eb82017-01-30 13:12:10 -0500792 if (is_ip4)
793 {
794 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
795 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
796 }
797 else
798 {
799 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
800 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
801 }
John Lo37682e12016-11-30 12:51:39 -0500802
Eyal Bari0f4b1842018-04-12 12:39:51 +0300803 next0 = error0 ?
John Lo37682e12016-11-30 12:51:39 -0500804 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
805 b0->error = error0 ? error_node->errors[error0] : 0;
806
John Lo2b81eb82017-01-30 13:12:10 -0500807 /* vxlan-input node expect current at VXLAN header */
808 if (is_ip4)
Eyal Bari7d92c092018-07-24 15:15:37 +0300809 vlib_buffer_advance (b0,
810 sizeof (ip4_header_t) +
811 sizeof (udp_header_t));
John Lo2b81eb82017-01-30 13:12:10 -0500812 else
Eyal Bari7d92c092018-07-24 15:15:37 +0300813 vlib_buffer_advance (b0,
814 sizeof (ip6_header_t) +
815 sizeof (udp_header_t));
John Lo2b81eb82017-01-30 13:12:10 -0500816
John Lo37682e12016-11-30 12:51:39 -0500817 exit:
818 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
819 to_next, n_left_to_next,
820 bi0, next0);
821 }
822
823 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
824 }
825
826 return frame->n_vectors;
827}
828
Eyal Baria5679e82018-08-26 15:20:07 +0300829VLIB_NODE_FN (ip4_vxlan_bypass_node) (vlib_main_t * vm,
830 vlib_node_runtime_t * node,
831 vlib_frame_t * frame)
John Lo37682e12016-11-30 12:51:39 -0500832{
833 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
834}
835
Eyal Bari7d92c092018-07-24 15:15:37 +0300836/* *INDENT-OFF* */
837VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) =
838{
John Lo37682e12016-11-30 12:51:39 -0500839 .name = "ip4-vxlan-bypass",
840 .vector_size = sizeof (u32),
John Lo37682e12016-11-30 12:51:39 -0500841 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
842 .next_nodes = {
Eyal Bari7d92c092018-07-24 15:15:37 +0300843 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
844 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
John Lo37682e12016-11-30 12:51:39 -0500845 },
John Lo37682e12016-11-30 12:51:39 -0500846 .format_buffer = format_ip4_header,
847 .format_trace = format_ip4_forward_next_trace,
848};
849
Eyal Bari7d92c092018-07-24 15:15:37 +0300850/* *INDENT-ON* */
John Lo37682e12016-11-30 12:51:39 -0500851
John Lo37682e12016-11-30 12:51:39 -0500852/* Dummy init function to get us linked in. */
Eyal Baria5679e82018-08-26 15:20:07 +0300853static clib_error_t *
Eyal Bari7d92c092018-07-24 15:15:37 +0300854ip4_vxlan_bypass_init (vlib_main_t * vm)
855{
856 return 0;
857}
John Lo37682e12016-11-30 12:51:39 -0500858
859VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);
John Lo2b81eb82017-01-30 13:12:10 -0500860
Eyal Baria5679e82018-08-26 15:20:07 +0300861VLIB_NODE_FN (ip6_vxlan_bypass_node) (vlib_main_t * vm,
862 vlib_node_runtime_t * node,
863 vlib_frame_t * frame)
John Lo2b81eb82017-01-30 13:12:10 -0500864{
865 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
866}
867
Eyal Bari7d92c092018-07-24 15:15:37 +0300868/* *INDENT-OFF* */
869VLIB_REGISTER_NODE (ip6_vxlan_bypass_node) =
870{
John Lo2b81eb82017-01-30 13:12:10 -0500871 .name = "ip6-vxlan-bypass",
872 .vector_size = sizeof (u32),
John Lo2b81eb82017-01-30 13:12:10 -0500873 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
874 .next_nodes = {
875 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
876 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
877 },
John Lo2b81eb82017-01-30 13:12:10 -0500878 .format_buffer = format_ip6_header,
879 .format_trace = format_ip6_forward_next_trace,
880};
881
Eyal Bari7d92c092018-07-24 15:15:37 +0300882/* *INDENT-ON* */
John Lo2b81eb82017-01-30 13:12:10 -0500883
884/* Dummy init function to get us linked in. */
Eyal Baria5679e82018-08-26 15:20:07 +0300885static clib_error_t *
Eyal Bari7d92c092018-07-24 15:15:37 +0300886ip6_vxlan_bypass_init (vlib_main_t * vm)
887{
888 return 0;
889}
John Lo2b81eb82017-01-30 13:12:10 -0500890
891VLIB_INIT_FUNCTION (ip6_vxlan_bypass_init);
eyal bariaf86a482018-04-17 11:20:27 +0300892
893#define foreach_vxlan_flow_input_next \
894_(DROP, "error-drop") \
895_(L2_INPUT, "l2-input")
896
897typedef enum
898{
899#define _(s,n) VXLAN_FLOW_NEXT_##s,
900 foreach_vxlan_flow_input_next
901#undef _
Eyal Bari7d92c092018-07-24 15:15:37 +0300902 VXLAN_FLOW_N_NEXT,
eyal bariaf86a482018-04-17 11:20:27 +0300903} vxlan_flow_input_next_t;
904
905#define foreach_vxlan_flow_error \
906 _(NONE, "no error") \
907 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
908 _(IP_HEADER_ERROR, "Rx ip header errors") \
909 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
910 _(UDP_LENGTH_ERROR, "Rx udp length errors")
911
912typedef enum
913{
914#define _(f,s) VXLAN_FLOW_ERROR_##f,
915 foreach_vxlan_flow_error
916#undef _
917 VXLAN_FLOW_N_ERROR,
918} vxlan_flow_error_t;
919
920static char *vxlan_flow_error_strings[] = {
921#define _(n,s) s,
922 foreach_vxlan_flow_error
923#undef _
924};
925
926
927static_always_inline u8
Eyal Bari7d92c092018-07-24 15:15:37 +0300928vxlan_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t * b)
eyal bariaf86a482018-04-17 11:20:27 +0300929{
930 u32 flags = b->flags;
Eyal Bari7d92c092018-07-24 15:15:37 +0300931 enum
932 { offset =
933 sizeof (ip4_header_t) + sizeof (udp_header_t) + sizeof (vxlan_header_t),
934 };
eyal bariaf86a482018-04-17 11:20:27 +0300935
936 /* Verify UDP checksum */
937 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
Eyal Bari7d92c092018-07-24 15:15:37 +0300938 {
939 vlib_buffer_advance (b, -offset);
940 flags = ip4_tcp_udp_validate_checksum (vm, b);
941 vlib_buffer_advance (b, offset);
942 }
eyal bariaf86a482018-04-17 11:20:27 +0300943
944 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
945}
946
947static_always_inline u8
Eyal Bari7d92c092018-07-24 15:15:37 +0300948vxlan_check_udp_csum (vlib_main_t * vm, vlib_buffer_t * b)
eyal bariaf86a482018-04-17 11:20:27 +0300949{
Eyal Bari7d92c092018-07-24 15:15:37 +0300950 ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
951 udp_header_t *udp = &hdr->udp;
eyal bariaf86a482018-04-17 11:20:27 +0300952 /* Don't verify UDP checksum for packets with explicit zero checksum. */
953 u8 good_csum = (b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0 ||
954 udp->checksum == 0;
955
956 return !good_csum;
957}
958
959static_always_inline u8
Eyal Bari7d92c092018-07-24 15:15:37 +0300960vxlan_check_ip (vlib_buffer_t * b, u16 payload_len)
eyal bariaf86a482018-04-17 11:20:27 +0300961{
Eyal Bari7d92c092018-07-24 15:15:37 +0300962 ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
eyal bariaf86a482018-04-17 11:20:27 +0300963 u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
964 u16 expected = payload_len + sizeof *hdr;
Eyal Bari7d92c092018-07-24 15:15:37 +0300965 return ip_len > expected || hdr->ip4.ttl == 0
966 || hdr->ip4.ip_version_and_header_length != 0x45;
eyal bariaf86a482018-04-17 11:20:27 +0300967}
968
969static_always_inline u8
Eyal Bari7d92c092018-07-24 15:15:37 +0300970vxlan_check_ip_udp_len (vlib_buffer_t * b)
eyal bariaf86a482018-04-17 11:20:27 +0300971{
Eyal Bari7d92c092018-07-24 15:15:37 +0300972 ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
eyal bariaf86a482018-04-17 11:20:27 +0300973 u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
974 u16 udp_len = clib_net_to_host_u16 (hdr->udp.length);
975 return udp_len > ip_len;
976}
977
978static_always_inline u8
979vxlan_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
980{
981 u8 error0 = VXLAN_FLOW_ERROR_NONE;
982 if (ip_err0)
Eyal Bari7d92c092018-07-24 15:15:37 +0300983 error0 = VXLAN_FLOW_ERROR_IP_HEADER_ERROR;
eyal bariaf86a482018-04-17 11:20:27 +0300984 if (udp_err0)
Eyal Bari7d92c092018-07-24 15:15:37 +0300985 error0 = VXLAN_FLOW_ERROR_UDP_LENGTH_ERROR;
eyal bariaf86a482018-04-17 11:20:27 +0300986 if (csum_err0)
Eyal Bari7d92c092018-07-24 15:15:37 +0300987 error0 = VXLAN_FLOW_ERROR_UDP_CHECKSUM_ERROR;
eyal bariaf86a482018-04-17 11:20:27 +0300988 return error0;
989}
990
Eyal Bari93a6f252018-06-14 08:57:39 +0300991VLIB_NODE_FN (vxlan4_flow_input_node) (vlib_main_t * vm,
Eyal Bari7d92c092018-07-24 15:15:37 +0300992 vlib_node_runtime_t * node,
993 vlib_frame_t * f)
eyal bariaf86a482018-04-17 11:20:27 +0300994{
Eyal Bari7d92c092018-07-24 15:15:37 +0300995 enum
996 { payload_offset = sizeof (ip4_vxlan_header_t) };
eyal bariaf86a482018-04-17 11:20:27 +0300997
Eyal Bari7d92c092018-07-24 15:15:37 +0300998 vxlan_main_t *vxm = &vxlan_main;
999 vnet_interface_main_t *im = &vnet_main.interface_main;
1000 vlib_combined_counter_main_t *rx_counter[VXLAN_FLOW_N_NEXT] = {
1001 [VXLAN_FLOW_NEXT_DROP] =
1002 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP,
1003 [VXLAN_FLOW_NEXT_L2_INPUT] =
1004 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
eyal bariaf86a482018-04-17 11:20:27 +03001005 };
Eyal Bari7d92c092018-07-24 15:15:37 +03001006 u32 thread_index = vlib_get_thread_index ();
eyal bariaf86a482018-04-17 11:20:27 +03001007
Eyal Bari7d92c092018-07-24 15:15:37 +03001008 u32 *from = vlib_frame_vector_args (f);
eyal bariaf86a482018-04-17 11:20:27 +03001009 u32 n_left_from = f->n_vectors;
1010 u32 next_index = VXLAN_FLOW_NEXT_L2_INPUT;
1011
1012 while (n_left_from > 0)
eyal bariaf86a482018-04-17 11:20:27 +03001013 {
Eyal Bari7d92c092018-07-24 15:15:37 +03001014 u32 n_left_to_next, *to_next;
eyal bariaf86a482018-04-17 11:20:27 +03001015
Eyal Bari7d92c092018-07-24 15:15:37 +03001016 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
eyal bariaf86a482018-04-17 11:20:27 +03001017
Eyal Bari7d92c092018-07-24 15:15:37 +03001018 while (n_left_from > 3 && n_left_to_next > 3)
eyal bariaf86a482018-04-17 11:20:27 +03001019 {
Eyal Bari7d92c092018-07-24 15:15:37 +03001020 u32 bi0 = to_next[0] = from[0];
1021 u32 bi1 = to_next[1] = from[1];
1022 u32 bi2 = to_next[2] = from[2];
1023 u32 bi3 = to_next[3] = from[3];
1024 from += 4;
1025 n_left_from -= 4;
1026 to_next += 4;
1027 n_left_to_next -= 4;
eyal bariaf86a482018-04-17 11:20:27 +03001028
Eyal Bari7d92c092018-07-24 15:15:37 +03001029 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1030 vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
1031 vlib_buffer_t *b2 = vlib_get_buffer (vm, bi2);
1032 vlib_buffer_t *b3 = vlib_get_buffer (vm, bi3);
1033
1034 vlib_buffer_advance (b0, payload_offset);
1035 vlib_buffer_advance (b1, payload_offset);
1036 vlib_buffer_advance (b2, payload_offset);
1037 vlib_buffer_advance (b3, payload_offset);
1038
1039 u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1040 u16 len1 = vlib_buffer_length_in_chain (vm, b1);
1041 u16 len2 = vlib_buffer_length_in_chain (vm, b2);
1042 u16 len3 = vlib_buffer_length_in_chain (vm, b3);
1043
1044 u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT, next1 =
1045 VXLAN_FLOW_NEXT_L2_INPUT, next2 =
1046 VXLAN_FLOW_NEXT_L2_INPUT, next3 = VXLAN_FLOW_NEXT_L2_INPUT;
1047
1048 u8 ip_err0 = vxlan_check_ip (b0, len0);
1049 u8 ip_err1 = vxlan_check_ip (b1, len1);
1050 u8 ip_err2 = vxlan_check_ip (b2, len2);
1051 u8 ip_err3 = vxlan_check_ip (b3, len3);
1052 u8 ip_err = ip_err0 | ip_err1 | ip_err2 | ip_err3;
1053
1054 u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1055 u8 udp_err1 = vxlan_check_ip_udp_len (b1);
1056 u8 udp_err2 = vxlan_check_ip_udp_len (b2);
1057 u8 udp_err3 = vxlan_check_ip_udp_len (b3);
1058 u8 udp_err = udp_err0 | udp_err1 | udp_err2 | udp_err3;
1059
1060 u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1061 u8 csum_err1 = vxlan_check_udp_csum (vm, b1);
1062 u8 csum_err2 = vxlan_check_udp_csum (vm, b2);
1063 u8 csum_err3 = vxlan_check_udp_csum (vm, b3);
1064 u8 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1065
1066 if (PREDICT_FALSE (csum_err))
1067 {
1068 if (csum_err0)
1069 csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1070 if (csum_err1)
1071 csum_err1 = !vxlan_validate_udp_csum (vm, b1);
1072 if (csum_err2)
1073 csum_err2 = !vxlan_validate_udp_csum (vm, b2);
1074 if (csum_err3)
1075 csum_err3 = !vxlan_validate_udp_csum (vm, b3);
1076 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1077 }
1078
1079 if (PREDICT_FALSE (ip_err || udp_err || csum_err))
1080 {
1081 if (ip_err0 || udp_err0 || csum_err0)
1082 {
1083 next0 = VXLAN_FLOW_NEXT_DROP;
1084 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1085 b0->error = node->errors[error0];
1086 }
1087 if (ip_err1 || udp_err1 || csum_err1)
1088 {
1089 next1 = VXLAN_FLOW_NEXT_DROP;
1090 u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1091 b1->error = node->errors[error1];
1092 }
1093 if (ip_err2 || udp_err2 || csum_err2)
1094 {
1095 next2 = VXLAN_FLOW_NEXT_DROP;
1096 u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1097 b2->error = node->errors[error2];
1098 }
1099 if (ip_err3 || udp_err3 || csum_err3)
1100 {
1101 next3 = VXLAN_FLOW_NEXT_DROP;
1102 u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1103 b3->error = node->errors[error3];
1104 }
1105 }
1106
1107 vnet_update_l2_len (b0);
1108 vnet_update_l2_len (b1);
1109 vnet_update_l2_len (b2);
1110 vnet_update_l2_len (b3);
1111
1112 ASSERT (b0->flow_id != 0);
1113 ASSERT (b1->flow_id != 0);
1114 ASSERT (b2->flow_id != 0);
1115 ASSERT (b3->flow_id != 0);
1116
1117 u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1118 u32 t_index1 = b1->flow_id - vxm->flow_id_start;
1119 u32 t_index2 = b2->flow_id - vxm->flow_id_start;
1120 u32 t_index3 = b3->flow_id - vxm->flow_id_start;
1121
1122 vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1123 vxlan_tunnel_t *t1 = &vxm->tunnels[t_index1];
1124 vxlan_tunnel_t *t2 = &vxm->tunnels[t_index2];
1125 vxlan_tunnel_t *t3 = &vxm->tunnels[t_index3];
1126
1127 /* flow id consumed */
1128 b0->flow_id = 0;
1129 b1->flow_id = 0;
1130 b2->flow_id = 0;
1131 b3->flow_id = 0;
1132
1133 u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1134 t0->sw_if_index;
1135 u32 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1136 t1->sw_if_index;
1137 u32 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX] =
1138 t2->sw_if_index;
1139 u32 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX] =
1140 t3->sw_if_index;
1141
1142 vlib_increment_combined_counter (rx_counter[next0], thread_index,
1143 sw_if_index0, 1, len0);
1144 vlib_increment_combined_counter (rx_counter[next1], thread_index,
1145 sw_if_index1, 1, len1);
1146 vlib_increment_combined_counter (rx_counter[next2], thread_index,
1147 sw_if_index2, 1, len2);
1148 vlib_increment_combined_counter (rx_counter[next3], thread_index,
1149 sw_if_index3, 1, len3);
1150
1151 u32 flags = b0->flags | b1->flags | b2->flags | b3->flags;
1152
1153 if (PREDICT_FALSE (flags & VLIB_BUFFER_IS_TRACED))
1154 {
1155 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1156 {
1157 vxlan_rx_trace_t *tr =
1158 vlib_add_trace (vm, node, b0, sizeof *tr);
1159 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1160 tr->next_index = next0;
1161 tr->error = error0;
1162 tr->tunnel_index = t_index0;
1163 tr->vni = t0->vni;
1164 }
1165 if (b1->flags & VLIB_BUFFER_IS_TRACED)
1166 {
1167 vxlan_rx_trace_t *tr =
1168 vlib_add_trace (vm, node, b1, sizeof *tr);
1169 u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1170 tr->next_index = next1;
1171 tr->error = error1;
1172 tr->tunnel_index = t_index1;
1173 tr->vni = t1->vni;
1174 }
1175 if (b2->flags & VLIB_BUFFER_IS_TRACED)
1176 {
1177 vxlan_rx_trace_t *tr =
1178 vlib_add_trace (vm, node, b2, sizeof *tr);
1179 u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1180 tr->next_index = next2;
1181 tr->error = error2;
1182 tr->tunnel_index = t_index2;
1183 tr->vni = t2->vni;
1184 }
1185 if (b3->flags & VLIB_BUFFER_IS_TRACED)
1186 {
1187 vxlan_rx_trace_t *tr =
1188 vlib_add_trace (vm, node, b3, sizeof *tr);
1189 u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1190 tr->next_index = next3;
1191 tr->error = error3;
1192 tr->tunnel_index = t_index3;
1193 tr->vni = t3->vni;
1194 }
1195 }
1196 vlib_validate_buffer_enqueue_x4
1197 (vm, node, next_index, to_next, n_left_to_next,
1198 bi0, bi1, bi2, bi3, next0, next1, next2, next3);
1199 }
1200 while (n_left_from > 0 && n_left_to_next > 0)
1201 {
1202 u32 bi0 = to_next[0] = from[0];
1203 from++;
1204 n_left_from--;
1205 to_next++;
1206 n_left_to_next--;
1207
1208 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1209 vlib_buffer_advance (b0, payload_offset);
1210
1211 u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1212 u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT;
1213
1214 u8 ip_err0 = vxlan_check_ip (b0, len0);
1215 u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1216 u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1217
1218 if (csum_err0)
1219 csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1220 if (ip_err0 || udp_err0 || csum_err0)
1221 {
1222 next0 = VXLAN_FLOW_NEXT_DROP;
1223 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1224 b0->error = node->errors[error0];
1225 }
1226
1227 vnet_update_l2_len (b0);
1228
1229 ASSERT (b0->flow_id != 0);
1230 u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1231 vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1232 b0->flow_id = 0;
1233
1234 u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1235 t0->sw_if_index;
1236 vlib_increment_combined_counter (rx_counter[next0], thread_index,
1237 sw_if_index0, 1, len0);
1238
1239 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1240 {
1241 vxlan_rx_trace_t *tr =
1242 vlib_add_trace (vm, node, b0, sizeof *tr);
1243 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1244 tr->next_index = next0;
1245 tr->error = error0;
1246 tr->tunnel_index = t_index0;
1247 tr->vni = t0->vni;
1248 }
1249 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1250 to_next, n_left_to_next,
1251 bi0, next0);
1252 }
1253
1254 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1255 }
eyal bariaf86a482018-04-17 11:20:27 +03001256
1257 return f->n_vectors;
1258}
1259
1260/* *INDENT-OFF* */
1261#ifndef CLIB_MULTIARCH_VARIANT
1262VLIB_REGISTER_NODE (vxlan4_flow_input_node) = {
1263 .name = "vxlan-flow-input",
eyal bariaf86a482018-04-17 11:20:27 +03001264 .type = VLIB_NODE_TYPE_INTERNAL,
1265 .vector_size = sizeof (u32),
1266
1267 .format_trace = format_vxlan_rx_trace,
1268
1269 .n_errors = VXLAN_FLOW_N_ERROR,
1270 .error_strings = vxlan_flow_error_strings,
1271
1272 .n_next_nodes = VXLAN_FLOW_N_NEXT,
1273 .next_nodes = {
1274#define _(s,n) [VXLAN_FLOW_NEXT_##s] = n,
1275 foreach_vxlan_flow_input_next
1276#undef _
1277 },
1278};
1279#endif
1280/* *INDENT-ON* */
Eyal Bari7d92c092018-07-24 15:15:37 +03001281
1282/*
1283 * fd.io coding-style-patch-verification: ON
1284 *
1285 * Local Variables:
1286 * eval: (c-set-style "gnu")
1287 * End:
1288 */