blob: 927c778b21191e333dd8ccd80b321ebc16b3cf61 [file] [log] [blame]
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001/*
2 * decap.c: vxlan gbp tunnel decap packet processing
3 *
4 * Copyright (c) 2018 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
Neale Ranns76b56492018-09-28 15:16:14 +000019
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020020#include <vnet/vxlan-gbp/vxlan_gbp.h>
21
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020022typedef struct
23{
24 u32 next_index;
25 u32 tunnel_index;
26 u32 error;
27 u32 vni;
28 u16 sclass;
Neale Ranns93cc3ee2018-10-10 07:22:51 -070029 u8 flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020030} vxlan_gbp_rx_trace_t;
31
32static u8 *
33format_vxlan_gbp_rx_trace (u8 * s, va_list * args)
34{
35 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37 vxlan_gbp_rx_trace_t *t = va_arg (*args, vxlan_gbp_rx_trace_t *);
38
39 if (t->tunnel_index == ~0)
40 return format (s,
41 "VXLAN_GBP decap error - tunnel for vni %d does not exist",
42 t->vni);
43 return format (s,
44 "VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d"
Neale Ranns93cc3ee2018-10-10 07:22:51 -070045 " flags %U next %d error %d",
46 t->tunnel_index, t->vni, t->sclass,
47 format_vxlan_gbp_header_gpflags, t->flags,
48 t->next_index, t->error);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020049}
50
51always_inline u32
52buf_fib_index (vlib_buffer_t * b, u32 is_ip4)
53{
54 u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
55 if (sw_if_index != (u32) ~ 0)
56 return sw_if_index;
57
58 u32 *fib_index_by_sw_if_index = is_ip4 ?
59 ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index;
60 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
61
62 return vec_elt (fib_index_by_sw_if_index, sw_if_index);
63}
64
65typedef vxlan4_gbp_tunnel_key_t last_tunnel_cache4;
66
67always_inline vxlan_gbp_tunnel_t *
68vxlan4_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache4 * cache,
69 u32 fib_index, ip4_header_t * ip4_0,
Neale Ranns665581a2019-03-05 06:11:57 -080070 vxlan_gbp_header_t * vxlan_gbp0)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020071{
Neale Ranns665581a2019-03-05 06:11:57 -080072 /*
73 * Check unicast first since that's where most of the traffic comes from
74 * Make sure VXLAN_GBP tunnel exist according to packet SIP, DIP and VNI
75 */
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020076 vxlan4_gbp_tunnel_key_t key4;
Neale Ranns665581a2019-03-05 06:11:57 -080077 int rv;
78
Neale Ranns81cfa9c2019-07-04 14:12:50 +000079 key4.key[1] = (((u64) fib_index << 32) |
80 (vxlan_gbp0->vni_reserved &
81 clib_host_to_net_u32 (0xffffff00)));
82 key4.key[0] =
83 (((u64) ip4_0->dst_address.as_u32 << 32) | ip4_0->src_address.as_u32);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020084
Neale Ranns665581a2019-03-05 06:11:57 -080085 if (PREDICT_FALSE (key4.key[0] != cache->key[0] ||
86 key4.key[1] != cache->key[1]))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020087 {
Neale Ranns665581a2019-03-05 06:11:57 -080088 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key,
89 &key4);
90 if (PREDICT_FALSE (rv == 0))
91 {
92 *cache = key4;
93 return (pool_elt_at_index (vxm->tunnels, cache->value));
94 }
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020095 }
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020096 else
97 {
Neale Ranns665581a2019-03-05 06:11:57 -080098 return (pool_elt_at_index (vxm->tunnels, cache->value));
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020099 }
100
Neale Ranns665581a2019-03-05 06:11:57 -0800101 /* No unicast match - try multicast */
102 if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address)))
103 return (NULL);
104
105 key4.key[0] = ip4_0->dst_address.as_u32;
106 /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
107 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4);
108
109 if (PREDICT_FALSE (rv != 0))
110 return (NULL);
111
112 return (pool_elt_at_index (vxm->tunnels, key4.value));
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200113}
114
115typedef vxlan6_gbp_tunnel_key_t last_tunnel_cache6;
116
117always_inline vxlan_gbp_tunnel_t *
118vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache,
119 u32 fib_index, ip6_header_t * ip6_0,
Neale Ranns665581a2019-03-05 06:11:57 -0800120 vxlan_gbp_header_t * vxlan_gbp0)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200121{
122 /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */
123 vxlan6_gbp_tunnel_key_t key6 = {
124 .key = {
125 [0] = ip6_0->src_address.as_u64[0],
126 [1] = ip6_0->src_address.as_u64[1],
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000127 [2] = ((((u64) fib_index) << 32) |
128 (vxlan_gbp0->vni_reserved &
129 clib_host_to_net_u32 (0xffffff00))),
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200130 }
131 };
Neale Ranns665581a2019-03-05 06:11:57 -0800132 int rv;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200133
134 if (PREDICT_FALSE
135 (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
136 {
Neale Ranns665581a2019-03-05 06:11:57 -0800137 rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
138 &key6);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200139 if (PREDICT_FALSE (rv != 0))
Neale Ranns665581a2019-03-05 06:11:57 -0800140 return NULL;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200141
142 *cache = key6;
143 }
144 vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
145
146 /* Validate VXLAN_GBP tunnel SIP against packet DIP */
Neale Ranns665581a2019-03-05 06:11:57 -0800147 if (PREDICT_FALSE
148 (!ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200149 {
150 /* try multicast */
151 if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address)))
152 return 0;
153
154 /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
155 key6.key[0] = ip6_0->dst_address.as_u64[0];
156 key6.key[1] = ip6_0->dst_address.as_u64[1];
Neale Ranns665581a2019-03-05 06:11:57 -0800157 rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
158 &key6);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200159 if (PREDICT_FALSE (rv != 0))
160 return 0;
161
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200162 }
163
164 return t0;
165}
166
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700167always_inline vxlan_gbp_input_next_t
168vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0)
169{
170 if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode)
171 return (VXLAN_GBP_INPUT_NEXT_L2_INPUT);
172 else
173 {
174 ethernet_header_t *e0;
175 u16 type0;
176
177 e0 = vlib_buffer_get_current (b0);
178 vlib_buffer_advance (b0, sizeof (*e0));
179 type0 = clib_net_to_host_u16 (e0->type);
180 switch (type0)
181 {
182 case ETHERNET_TYPE_IP4:
183 return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT);
184 case ETHERNET_TYPE_IP6:
185 return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT);
186 }
187 }
188 return (VXLAN_GBP_INPUT_NEXT_DROP);
189}
190
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200191always_inline uword
192vxlan_gbp_input (vlib_main_t * vm,
193 vlib_node_runtime_t * node,
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700194 vlib_frame_t * from_frame, u8 is_ip4)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200195{
196 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
197 vnet_main_t *vnm = vxm->vnet_main;
198 vnet_interface_main_t *im = &vnm->interface_main;
199 vlib_combined_counter_main_t *rx_counter =
200 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
201 vlib_combined_counter_main_t *drop_counter =
202 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
203 last_tunnel_cache4 last4;
204 last_tunnel_cache6 last6;
205 u32 pkts_decapsulated = 0;
206 u32 thread_index = vlib_get_thread_index ();
207
208 if (is_ip4)
Dave Barachb7b92992018-10-17 10:38:51 -0400209 clib_memset (&last4, 0xff, sizeof last4);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200210 else
Dave Barachb7b92992018-10-17 10:38:51 -0400211 clib_memset (&last6, 0xff, sizeof last6);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200212
213 u32 next_index = node->cached_next_index;
214
215 u32 *from = vlib_frame_vector_args (from_frame);
216 u32 n_left_from = from_frame->n_vectors;
217
218 while (n_left_from > 0)
219 {
220 u32 *to_next, n_left_to_next;
221 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
222
223 while (n_left_from >= 4 && n_left_to_next >= 2)
224 {
225 /* Prefetch next iteration. */
226 {
227 vlib_buffer_t *p2, *p3;
228
229 p2 = vlib_get_buffer (vm, from[2]);
230 p3 = vlib_get_buffer (vm, from[3]);
231
232 vlib_prefetch_buffer_header (p2, LOAD);
233 vlib_prefetch_buffer_header (p3, LOAD);
234
235 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
236 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
237 }
238
239 u32 bi0 = to_next[0] = from[0];
240 u32 bi1 = to_next[1] = from[1];
241 from += 2;
242 to_next += 2;
243 n_left_to_next -= 2;
244 n_left_from -= 2;
245
246 vlib_buffer_t *b0, *b1;
247 b0 = vlib_get_buffer (vm, bi0);
248 b1 = vlib_get_buffer (vm, bi1);
249
250 /* udp leaves current_data pointing at the vxlan_gbp header */
251 void *cur0 = vlib_buffer_get_current (b0);
252 void *cur1 = vlib_buffer_get_current (b1);
253 vxlan_gbp_header_t *vxlan_gbp0 = cur0;
254 vxlan_gbp_header_t *vxlan_gbp1 = cur1;
255
256 ip4_header_t *ip4_0, *ip4_1;
257 ip6_header_t *ip6_0, *ip6_1;
258 if (is_ip4)
259 {
260 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
261 ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
262 }
263 else
264 {
265 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
266 ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
267 }
268
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200269 u32 fi0 = buf_fib_index (b0, is_ip4);
270 u32 fi1 = buf_fib_index (b1, is_ip4);
271
Neale Ranns665581a2019-03-05 06:11:57 -0800272 vxlan_gbp_tunnel_t *t0, *t1;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200273 if (is_ip4)
274 {
275 t0 =
Neale Ranns665581a2019-03-05 06:11:57 -0800276 vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200277 t1 =
Neale Ranns665581a2019-03-05 06:11:57 -0800278 vxlan4_gbp_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan_gbp1);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200279 }
280 else
281 {
282 t0 =
Neale Ranns665581a2019-03-05 06:11:57 -0800283 vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200284 t1 =
Neale Ranns665581a2019-03-05 06:11:57 -0800285 vxlan6_gbp_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan_gbp1);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200286 }
287
288 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
289 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
290
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700291 vxlan_gbp_input_next_t next0, next1;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200292 u8 error0 = 0, error1 = 0;
293 u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
294 u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700295 /* Required to make the l2 tag push / pop code work on l2 subifs */
296 /* pop vxlan_gbp */
297 vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
298 vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
299
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000300 u8 i_and_g0 = ((flags0 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI);
301 u8 i_and_g1 = ((flags1 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI);
302
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700303 /* Validate VXLAN_GBP tunnel encap-fib index against packet */
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000304 if (PREDICT_FALSE (t0 == NULL || !i_and_g0))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200305 {
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000306 if (t0 != NULL && !i_and_g0)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200307 {
308 error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
309 vlib_increment_combined_counter
Neale Ranns665581a2019-03-05 06:11:57 -0800310 (drop_counter, thread_index, t0->sw_if_index, 1, len0);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700311 next0 = VXLAN_GBP_INPUT_NEXT_DROP;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200312 }
313 else
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700314 {
315 error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
Neale Ranns76b56492018-09-28 15:16:14 +0000316 next0 = VXLAN_GBP_INPUT_NEXT_PUNT;
317 if (is_ip4)
318 b0->punt_reason =
319 vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4];
320 else
321 b0->punt_reason =
322 vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6];
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700323 }
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200324 b0->error = node->errors[error0];
325 }
326 else
327 {
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700328 next0 = vxlan_gbp_tunnel_get_next (t0, b0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200329
330 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
331 vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
332 vlib_increment_combined_counter
Neale Ranns665581a2019-03-05 06:11:57 -0800333 (rx_counter, thread_index, t0->sw_if_index, 1, len0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200334 pkts_decapsulated++;
335 }
336
Neale Ranns2b600182019-03-29 05:08:27 -0700337 vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) |
338 VXLAN_GBP_GPFLAGS_R);
Neale Ranns879d11c2019-01-21 23:34:18 -0800339 vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700340
341
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000342 if (PREDICT_FALSE (t1 == NULL || !i_and_g1))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200343 {
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000344 if (t1 != NULL && !i_and_g1)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200345 {
346 error1 = VXLAN_GBP_ERROR_BAD_FLAGS;
347 vlib_increment_combined_counter
Neale Ranns665581a2019-03-05 06:11:57 -0800348 (drop_counter, thread_index, t1->sw_if_index, 1, len1);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700349 next1 = VXLAN_GBP_INPUT_NEXT_DROP;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200350 }
351 else
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700352 {
353 error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
Neale Ranns76b56492018-09-28 15:16:14 +0000354 next1 = VXLAN_GBP_INPUT_NEXT_PUNT;
355 if (is_ip4)
356 b1->punt_reason =
357 vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4];
358 else
359 b1->punt_reason =
360 vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6];
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700361 }
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200362 b1->error = node->errors[error1];
363 }
364 else
365 {
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700366 next1 = vxlan_gbp_tunnel_get_next (t1, b1);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200367
368 /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
369 vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
370 pkts_decapsulated++;
371
372 vlib_increment_combined_counter
Neale Ranns665581a2019-03-05 06:11:57 -0800373 (rx_counter, thread_index, t1->sw_if_index, 1, len1);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200374 }
375
Neale Ranns2b600182019-03-29 05:08:27 -0700376 vnet_buffer2 (b1)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp1) |
377 VXLAN_GBP_GPFLAGS_R);
378
Neale Ranns879d11c2019-01-21 23:34:18 -0800379 vnet_buffer2 (b1)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700380
381 vnet_update_l2_len (b0);
382 vnet_update_l2_len (b1);
383
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200384 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
385 {
386 vxlan_gbp_rx_trace_t *tr =
387 vlib_add_trace (vm, node, b0, sizeof (*tr));
388 tr->next_index = next0;
389 tr->error = error0;
390 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
391 tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
392 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700393 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200394 }
395 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
396 {
397 vxlan_gbp_rx_trace_t *tr =
398 vlib_add_trace (vm, node, b1, sizeof (*tr));
399 tr->next_index = next1;
400 tr->error = error1;
401 tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
402 tr->vni = vxlan_gbp_get_vni (vxlan_gbp1);
403 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
Neale Ranns5ecbbc12018-11-14 08:18:12 -0800404 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200405 }
406
407 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
408 to_next, n_left_to_next,
409 bi0, bi1, next0, next1);
410 }
411
412 while (n_left_from > 0 && n_left_to_next > 0)
413 {
414 u32 bi0 = to_next[0] = from[0];
415 from += 1;
416 to_next += 1;
417 n_left_from -= 1;
418 n_left_to_next -= 1;
419
420 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
421
422 /* udp leaves current_data pointing at the vxlan_gbp header */
423 void *cur0 = vlib_buffer_get_current (b0);
424 vxlan_gbp_header_t *vxlan_gbp0 = cur0;
425 ip4_header_t *ip4_0;
426 ip6_header_t *ip6_0;
427 if (is_ip4)
428 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
429 else
430 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
431
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200432 u32 fi0 = buf_fib_index (b0, is_ip4);
433
Neale Ranns665581a2019-03-05 06:11:57 -0800434 vxlan_gbp_tunnel_t *t0;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200435 if (is_ip4)
Neale Ranns665581a2019-03-05 06:11:57 -0800436 t0 = vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200437 else
Neale Ranns665581a2019-03-05 06:11:57 -0800438 t0 = vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200439
440 uword len0 = vlib_buffer_length_in_chain (vm, b0);
441
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700442 vxlan_gbp_input_next_t next0;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200443 u8 error0 = 0;
444 u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700445
446 /* pop (ip, udp, vxlan_gbp) */
447 vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000448
449 u8 i_and_g0 = ((flags0 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI);
450
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700451 /* Validate VXLAN_GBP tunnel encap-fib index against packet */
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000452 if (PREDICT_FALSE (t0 == NULL || !i_and_g0))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200453 {
Neale Ranns81cfa9c2019-07-04 14:12:50 +0000454 if (t0 != NULL && !i_and_g0)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200455 {
456 error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
457 vlib_increment_combined_counter
Neale Ranns665581a2019-03-05 06:11:57 -0800458 (drop_counter, thread_index, t0->sw_if_index, 1, len0);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700459 next0 = VXLAN_GBP_INPUT_NEXT_DROP;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200460 }
461 else
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700462 {
463 error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
Neale Ranns76b56492018-09-28 15:16:14 +0000464 next0 = VXLAN_GBP_INPUT_NEXT_PUNT;
465 if (is_ip4)
466 b0->punt_reason =
467 vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4];
468 else
469 b0->punt_reason =
470 vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6];
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700471 }
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200472 b0->error = node->errors[error0];
473 }
474 else
475 {
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700476 next0 = vxlan_gbp_tunnel_get_next (t0, b0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200477 /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
478 vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
479 pkts_decapsulated++;
480
481 vlib_increment_combined_counter
Neale Ranns665581a2019-03-05 06:11:57 -0800482 (rx_counter, thread_index, t0->sw_if_index, 1, len0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200483 }
Neale Ranns2b600182019-03-29 05:08:27 -0700484 vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) |
485 VXLAN_GBP_GPFLAGS_R);
486
Neale Ranns879d11c2019-01-21 23:34:18 -0800487 vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700488
489 /* Required to make the l2 tag push / pop code work on l2 subifs */
490 vnet_update_l2_len (b0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200491
492 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
493 {
494 vxlan_gbp_rx_trace_t *tr
495 = vlib_add_trace (vm, node, b0, sizeof (*tr));
496 tr->next_index = next0;
497 tr->error = error0;
498 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
499 tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
500 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
Neale Ranns93cc3ee2018-10-10 07:22:51 -0700501 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200502 }
503 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
504 to_next, n_left_to_next,
505 bi0, next0);
506 }
507
508 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
509 }
510 /* Do we still need this now that tunnel tx stats is kept? */
511 u32 node_idx =
512 is_ip4 ? vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index;
513 vlib_node_increment_counter (vm, node_idx, VXLAN_GBP_ERROR_DECAPSULATED,
514 pkts_decapsulated);
515
516 return from_frame->n_vectors;
517}
518
Filip Tehlare1714d32019-03-05 03:01:43 -0800519VLIB_NODE_FN (vxlan4_gbp_input_node) (vlib_main_t * vm,
520 vlib_node_runtime_t * node,
521 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200522{
523 return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 1);
524}
525
Filip Tehlare1714d32019-03-05 03:01:43 -0800526VLIB_NODE_FN (vxlan6_gbp_input_node) (vlib_main_t * vm,
527 vlib_node_runtime_t * node,
528 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200529{
530 return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 0);
531}
532
533static char *vxlan_gbp_error_strings[] = {
534#define vxlan_gbp_error(n,s) s,
535#include <vnet/vxlan-gbp/vxlan_gbp_error.def>
536#undef vxlan_gbp_error
537#undef _
538};
539
540/* *INDENT-OFF* */
541VLIB_REGISTER_NODE (vxlan4_gbp_input_node) =
542{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200543 .name = "vxlan4-gbp-input",
544 .vector_size = sizeof (u32),
545 .n_errors = VXLAN_GBP_N_ERROR,
546 .error_strings = vxlan_gbp_error_strings,
547 .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
548 .format_trace = format_vxlan_gbp_rx_trace,
549 .next_nodes = {
550#define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
551 foreach_vxlan_gbp_input_next
552#undef _
553 },
554};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200555
556VLIB_REGISTER_NODE (vxlan6_gbp_input_node) =
557{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200558 .name = "vxlan6-gbp-input",
559 .vector_size = sizeof (u32),
560 .n_errors = VXLAN_GBP_N_ERROR,
561 .error_strings = vxlan_gbp_error_strings,
562 .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
563 .next_nodes = {
564#define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
565 foreach_vxlan_gbp_input_next
566#undef _
567 },
568 .format_trace = format_vxlan_gbp_rx_trace,
569};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200570/* *INDENT-ON* */
571
572typedef enum
573{
574 IP_VXLAN_GBP_BYPASS_NEXT_DROP,
575 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP,
576 IP_VXLAN_GBP_BYPASS_N_NEXT,
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -0700577} ip_vxlan_gbp_bypass_next_t;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200578
579always_inline uword
580ip_vxlan_gbp_bypass_inline (vlib_main_t * vm,
581 vlib_node_runtime_t * node,
582 vlib_frame_t * frame, u32 is_ip4)
583{
584 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
585 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
586 vlib_node_runtime_t *error_node =
587 vlib_node_get_runtime (vm, ip4_input_node.index);
588 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
589 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
590
591 from = vlib_frame_vector_args (frame);
592 n_left_from = frame->n_vectors;
593 next_index = node->cached_next_index;
594
595 if (node->flags & VLIB_NODE_FLAG_TRACE)
596 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
597
598 if (is_ip4)
599 addr4.data_u32 = ~0;
600 else
601 ip6_address_set_zero (&addr6);
602
603 while (n_left_from > 0)
604 {
605 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
606
607 while (n_left_from >= 4 && n_left_to_next >= 2)
608 {
609 vlib_buffer_t *b0, *b1;
610 ip4_header_t *ip40, *ip41;
611 ip6_header_t *ip60, *ip61;
612 udp_header_t *udp0, *udp1;
613 u32 bi0, ip_len0, udp_len0, flags0, next0;
614 u32 bi1, ip_len1, udp_len1, flags1, next1;
615 i32 len_diff0, len_diff1;
616 u8 error0, good_udp0, proto0;
617 u8 error1, good_udp1, proto1;
618
619 /* Prefetch next iteration. */
620 {
621 vlib_buffer_t *p2, *p3;
622
623 p2 = vlib_get_buffer (vm, from[2]);
624 p3 = vlib_get_buffer (vm, from[3]);
625
626 vlib_prefetch_buffer_header (p2, LOAD);
627 vlib_prefetch_buffer_header (p3, LOAD);
628
629 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
630 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
631 }
632
633 bi0 = to_next[0] = from[0];
634 bi1 = to_next[1] = from[1];
635 from += 2;
636 n_left_from -= 2;
637 to_next += 2;
638 n_left_to_next -= 2;
639
640 b0 = vlib_get_buffer (vm, bi0);
641 b1 = vlib_get_buffer (vm, bi1);
642 if (is_ip4)
643 {
644 ip40 = vlib_buffer_get_current (b0);
645 ip41 = vlib_buffer_get_current (b1);
646 }
647 else
648 {
649 ip60 = vlib_buffer_get_current (b0);
650 ip61 = vlib_buffer_get_current (b1);
651 }
652
653 /* Setup packet for next IP feature */
654 vnet_feature_next (&next0, b0);
655 vnet_feature_next (&next1, b1);
656
657 if (is_ip4)
658 {
659 /* Treat IP frag packets as "experimental" protocol for now
660 until support of IP frag reassembly is implemented */
661 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
662 proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
663 }
664 else
665 {
666 proto0 = ip60->protocol;
667 proto1 = ip61->protocol;
668 }
669
670 /* Process packet 0 */
671 if (proto0 != IP_PROTOCOL_UDP)
672 goto exit0; /* not UDP packet */
673
674 if (is_ip4)
675 udp0 = ip4_next_header (ip40);
676 else
677 udp0 = ip6_next_header (ip60);
678
679 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
680 goto exit0; /* not VXLAN_GBP packet */
681
682 /* Validate DIP against VTEPs */
683 if (is_ip4)
684 {
685 if (addr4.as_u32 != ip40->dst_address.as_u32)
686 {
687 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
688 goto exit0; /* no local VTEP for VXLAN_GBP packet */
689 addr4 = ip40->dst_address;
690 }
691 }
692 else
693 {
694 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
695 {
696 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
697 goto exit0; /* no local VTEP for VXLAN_GBP packet */
698 addr6 = ip60->dst_address;
699 }
700 }
701
702 flags0 = b0->flags;
703 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
704
705 /* Don't verify UDP checksum for packets with explicit zero checksum. */
706 good_udp0 |= udp0->checksum == 0;
707
708 /* Verify UDP length */
709 if (is_ip4)
710 ip_len0 = clib_net_to_host_u16 (ip40->length);
711 else
712 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
713 udp_len0 = clib_net_to_host_u16 (udp0->length);
714 len_diff0 = ip_len0 - udp_len0;
715
716 /* Verify UDP checksum */
717 if (PREDICT_FALSE (!good_udp0))
718 {
719 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
720 {
721 if (is_ip4)
722 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
723 else
724 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
725 good_udp0 =
726 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
727 }
728 }
729
730 if (is_ip4)
731 {
732 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
733 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
734 }
735 else
736 {
737 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
738 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
739 }
740
741 next0 = error0 ?
742 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
743 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
744 b0->error = error0 ? error_node->errors[error0] : 0;
745
746 /* vxlan-gbp-input node expect current at VXLAN_GBP header */
747 if (is_ip4)
748 vlib_buffer_advance (b0,
749 sizeof (ip4_header_t) +
750 sizeof (udp_header_t));
751 else
752 vlib_buffer_advance (b0,
753 sizeof (ip6_header_t) +
754 sizeof (udp_header_t));
755
756 exit0:
757 /* Process packet 1 */
758 if (proto1 != IP_PROTOCOL_UDP)
759 goto exit1; /* not UDP packet */
760
761 if (is_ip4)
762 udp1 = ip4_next_header (ip41);
763 else
764 udp1 = ip6_next_header (ip61);
765
766 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
767 goto exit1; /* not VXLAN_GBP packet */
768
769 /* Validate DIP against VTEPs */
770 if (is_ip4)
771 {
772 if (addr4.as_u32 != ip41->dst_address.as_u32)
773 {
774 if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
775 goto exit1; /* no local VTEP for VXLAN_GBP packet */
776 addr4 = ip41->dst_address;
777 }
778 }
779 else
780 {
781 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
782 {
783 if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
784 goto exit1; /* no local VTEP for VXLAN_GBP packet */
785 addr6 = ip61->dst_address;
786 }
787 }
788
789 flags1 = b1->flags;
790 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
791
792 /* Don't verify UDP checksum for packets with explicit zero checksum. */
793 good_udp1 |= udp1->checksum == 0;
794
795 /* Verify UDP length */
796 if (is_ip4)
797 ip_len1 = clib_net_to_host_u16 (ip41->length);
798 else
799 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
800 udp_len1 = clib_net_to_host_u16 (udp1->length);
801 len_diff1 = ip_len1 - udp_len1;
802
803 /* Verify UDP checksum */
804 if (PREDICT_FALSE (!good_udp1))
805 {
806 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
807 {
808 if (is_ip4)
809 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
810 else
811 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
812 good_udp1 =
813 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
814 }
815 }
816
817 if (is_ip4)
818 {
819 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
820 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
821 }
822 else
823 {
824 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
825 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
826 }
827
828 next1 = error1 ?
829 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
830 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
831 b1->error = error1 ? error_node->errors[error1] : 0;
832
833 /* vxlan_gbp-input node expect current at VXLAN_GBP header */
834 if (is_ip4)
835 vlib_buffer_advance (b1,
836 sizeof (ip4_header_t) +
837 sizeof (udp_header_t));
838 else
839 vlib_buffer_advance (b1,
840 sizeof (ip6_header_t) +
841 sizeof (udp_header_t));
842
843 exit1:
844 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
845 to_next, n_left_to_next,
846 bi0, bi1, next0, next1);
847 }
848
849 while (n_left_from > 0 && n_left_to_next > 0)
850 {
851 vlib_buffer_t *b0;
852 ip4_header_t *ip40;
853 ip6_header_t *ip60;
854 udp_header_t *udp0;
855 u32 bi0, ip_len0, udp_len0, flags0, next0;
856 i32 len_diff0;
857 u8 error0, good_udp0, proto0;
858
859 bi0 = to_next[0] = from[0];
860 from += 1;
861 n_left_from -= 1;
862 to_next += 1;
863 n_left_to_next -= 1;
864
865 b0 = vlib_get_buffer (vm, bi0);
866 if (is_ip4)
867 ip40 = vlib_buffer_get_current (b0);
868 else
869 ip60 = vlib_buffer_get_current (b0);
870
871 /* Setup packet for next IP feature */
872 vnet_feature_next (&next0, b0);
873
874 if (is_ip4)
875 /* Treat IP4 frag packets as "experimental" protocol for now
876 until support of IP frag reassembly is implemented */
877 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
878 else
879 proto0 = ip60->protocol;
880
881 if (proto0 != IP_PROTOCOL_UDP)
882 goto exit; /* not UDP packet */
883
884 if (is_ip4)
885 udp0 = ip4_next_header (ip40);
886 else
887 udp0 = ip6_next_header (ip60);
888
889 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
890 goto exit; /* not VXLAN_GBP packet */
891
892 /* Validate DIP against VTEPs */
893 if (is_ip4)
894 {
895 if (addr4.as_u32 != ip40->dst_address.as_u32)
896 {
897 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
898 goto exit; /* no local VTEP for VXLAN_GBP packet */
899 addr4 = ip40->dst_address;
900 }
901 }
902 else
903 {
904 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
905 {
906 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
907 goto exit; /* no local VTEP for VXLAN_GBP packet */
908 addr6 = ip60->dst_address;
909 }
910 }
911
912 flags0 = b0->flags;
913 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
914
915 /* Don't verify UDP checksum for packets with explicit zero checksum. */
916 good_udp0 |= udp0->checksum == 0;
917
918 /* Verify UDP length */
919 if (is_ip4)
920 ip_len0 = clib_net_to_host_u16 (ip40->length);
921 else
922 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
923 udp_len0 = clib_net_to_host_u16 (udp0->length);
924 len_diff0 = ip_len0 - udp_len0;
925
926 /* Verify UDP checksum */
927 if (PREDICT_FALSE (!good_udp0))
928 {
929 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
930 {
931 if (is_ip4)
932 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
933 else
934 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
935 good_udp0 =
936 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
937 }
938 }
939
940 if (is_ip4)
941 {
942 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
943 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
944 }
945 else
946 {
947 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
948 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
949 }
950
951 next0 = error0 ?
952 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
953 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
954 b0->error = error0 ? error_node->errors[error0] : 0;
955
956 /* vxlan_gbp-input node expect current at VXLAN_GBP header */
957 if (is_ip4)
958 vlib_buffer_advance (b0,
959 sizeof (ip4_header_t) +
960 sizeof (udp_header_t));
961 else
962 vlib_buffer_advance (b0,
963 sizeof (ip6_header_t) +
964 sizeof (udp_header_t));
965
966 exit:
967 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
968 to_next, n_left_to_next,
969 bi0, next0);
970 }
971
972 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
973 }
974
975 return frame->n_vectors;
976}
977
Filip Tehlare1714d32019-03-05 03:01:43 -0800978VLIB_NODE_FN (ip4_vxlan_gbp_bypass_node) (vlib_main_t * vm,
979 vlib_node_runtime_t * node,
980 vlib_frame_t * frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200981{
982 return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
983}
984
985/* *INDENT-OFF* */
986VLIB_REGISTER_NODE (ip4_vxlan_gbp_bypass_node) =
987{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200988 .name = "ip4-vxlan-gbp-bypass",
989 .vector_size = sizeof (u32),
990 .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
991 .next_nodes = {
992 [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
993 [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan4-gbp-input",
994 },
995 .format_buffer = format_ip4_header,
996 .format_trace = format_ip4_forward_next_trace,
997};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200998/* *INDENT-ON* */
999
Filip Tehlare1714d32019-03-05 03:01:43 -08001000#ifndef CLIB_MARCH_VARIANT
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001001/* Dummy init function to get us linked in. */
1002clib_error_t *
1003ip4_vxlan_gbp_bypass_init (vlib_main_t * vm)
1004{
1005 return 0;
1006}
1007
1008VLIB_INIT_FUNCTION (ip4_vxlan_gbp_bypass_init);
Filip Tehlare1714d32019-03-05 03:01:43 -08001009#endif /* CLIB_MARCH_VARIANT */
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001010
Filip Tehlare1714d32019-03-05 03:01:43 -08001011VLIB_NODE_FN (ip6_vxlan_gbp_bypass_node) (vlib_main_t * vm,
1012 vlib_node_runtime_t * node,
1013 vlib_frame_t * frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001014{
1015 return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1016}
1017
1018/* *INDENT-OFF* */
1019VLIB_REGISTER_NODE (ip6_vxlan_gbp_bypass_node) =
1020{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001021 .name = "ip6-vxlan-gbp-bypass",
1022 .vector_size = sizeof (u32),
1023 .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
1024 .next_nodes = {
1025 [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
1026 [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan6-gbp-input",
1027 },
1028 .format_buffer = format_ip6_header,
1029 .format_trace = format_ip6_forward_next_trace,
1030};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001031/* *INDENT-ON* */
1032
Filip Tehlare1714d32019-03-05 03:01:43 -08001033#ifndef CLIB_MARCH_VARIANT
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001034/* Dummy init function to get us linked in. */
1035clib_error_t *
1036ip6_vxlan_gbp_bypass_init (vlib_main_t * vm)
1037{
1038 return 0;
1039}
1040
1041VLIB_INIT_FUNCTION (ip6_vxlan_gbp_bypass_init);
Filip Tehlare1714d32019-03-05 03:01:43 -08001042#endif /* CLIB_MARCH_VARIANT */
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001043
1044/*
1045 * fd.io coding-style-patch-verification: ON
1046 *
1047 * Local Variables:
1048 * eval: (c-set-style "gnu")
1049 * End:
1050 */