blob: 1b3a8b00fc7598310775cbde339aecfbb426a1cf [file] [log] [blame]
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -07001/*
2 * decap.c - decapsulate VXLAN GPE
3 *
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070017/**
18 * @file
19 * @brief Functions for decapsulating VXLAN GPE tunnels
20 *
21*/
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070022
23#include <vlib/vlib.h>
24#include <vnet/pg/pg.h>
25#include <vnet/vxlan-gpe/vxlan_gpe.h>
26
Hongjun Nidf921cc2016-05-25 01:16:19 +080027vlib_node_registration_t vxlan_gpe_input_node;
28
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070029/**
30 * @brief Struct for VXLAN GPE decap packet tracing
31 *
32 */
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070033typedef struct {
34 u32 next_index;
35 u32 tunnel_index;
36 u32 error;
37} vxlan_gpe_rx_trace_t;
38
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070039/**
40 * @brief Tracing function for VXLAN GPE packet decapsulation
41 *
42 * @param *s
43 * @param *args
44 *
45 * @return *s
46 *
47 */
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070048static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
49{
50 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
52 vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
53
54 if (t->tunnel_index != ~0)
Hongjun Nidf921cc2016-05-25 01:16:19 +080055 {
56 s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
57 t->next_index, t->error);
58 }
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070059 else
Hongjun Nidf921cc2016-05-25 01:16:19 +080060 {
61 s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
62 t->error);
63 }
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070064 return s;
65}
66
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070067/**
68 * @brief Tracing function for VXLAN GPE packet decapsulation including length
69 *
70 * @param *s
71 * @param *args
72 *
73 * @return *s
74 *
75 */
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070076static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
77{
78 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80
81
82 return s;
83}
84
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070085/**
86 * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
87 *
88 * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
89 * tunnels are "terminate local". This means that there is no "TX" interface for this
90 * decap case, so that field in the buffer_metadata can be "used for something else".
91 * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
92 * FIB index used to look up the inner-packet's adjacency.
93 *
94 * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
95 *
96 * @param *vm
97 * @param *node
98 * @param *from_frame
Chris Luked4024f52016-09-06 09:32:36 -040099 * @param is_ip4
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700100 *
101 * @return from_frame->n_vectors
102 *
103 */
Hongjun Nidf921cc2016-05-25 01:16:19 +0800104always_inline uword
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700105vxlan_gpe_input (vlib_main_t * vm,
106 vlib_node_runtime_t * node,
Hongjun Nidf921cc2016-05-25 01:16:19 +0800107 vlib_frame_t * from_frame,
108 u8 is_ip4)
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700109{
Hongjun Nidf921cc2016-05-25 01:16:19 +0800110 u32 n_left_from, next_index, *from, *to_next;
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800111 vxlan_gpe_main_t * nngm = &vxlan_gpe_main;
112 vnet_main_t * vnm = nngm->vnet_main;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700113 vnet_interface_main_t * im = &vnm->interface_main;
114 u32 last_tunnel_index = ~0;
Hongjun Nidf921cc2016-05-25 01:16:19 +0800115 vxlan4_gpe_tunnel_key_t last_key4;
116 vxlan6_gpe_tunnel_key_t last_key6;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700117 u32 pkts_decapsulated = 0;
Damjan Marion586afd72017-04-05 19:18:20 +0200118 u32 thread_index = vlib_get_thread_index ();
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700119 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
120
Dave Barachf9c231e2016-08-05 10:10:18 -0400121 if (is_ip4)
122 memset (&last_key4, 0xff, sizeof(last_key4));
123 else
124 memset (&last_key6, 0xff, sizeof(last_key6));
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700125
126 from = vlib_frame_vector_args (from_frame);
127 n_left_from = from_frame->n_vectors;
128
129 next_index = node->cached_next_index;
130 stats_sw_if_index = node->runtime_data[0];
131 stats_n_packets = stats_n_bytes = 0;
132
133 while (n_left_from > 0)
Hongjun Nidf921cc2016-05-25 01:16:19 +0800134 {
135 u32 n_left_to_next;
136
137 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
138
139 while (n_left_from >= 4 && n_left_to_next >= 2)
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700140 {
Hongjun Nidf921cc2016-05-25 01:16:19 +0800141 u32 bi0, bi1;
142 vlib_buffer_t * b0, *b1;
143 u32 next0, next1;
144 ip4_vxlan_gpe_header_t * iuvn4_0, *iuvn4_1;
145 ip6_vxlan_gpe_header_t * iuvn6_0, *iuvn6_1;
146 uword * p0, *p1;
147 u32 tunnel_index0, tunnel_index1;
148 vxlan_gpe_tunnel_t * t0, *t1;
149 vxlan4_gpe_tunnel_key_t key4_0, key4_1;
150 vxlan6_gpe_tunnel_key_t key6_0, key6_1;
151 u32 error0, error1;
152 u32 sw_if_index0, sw_if_index1, len0, len1;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700153
Hongjun Nidf921cc2016-05-25 01:16:19 +0800154 /* Prefetch next iteration. */
155 {
156 vlib_buffer_t * p2, *p3;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700157
Hongjun Nidf921cc2016-05-25 01:16:19 +0800158 p2 = vlib_get_buffer (vm, from[2]);
159 p3 = vlib_get_buffer (vm, from[3]);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700160
Hongjun Nidf921cc2016-05-25 01:16:19 +0800161 vlib_prefetch_buffer_header(p2, LOAD);
162 vlib_prefetch_buffer_header(p3, LOAD);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700163
Hongjun Nidf921cc2016-05-25 01:16:19 +0800164 CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
165 CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
166 }
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700167
Hongjun Nidf921cc2016-05-25 01:16:19 +0800168 bi0 = from[0];
169 bi1 = from[1];
170 to_next[0] = bi0;
171 to_next[1] = bi1;
172 from += 2;
173 to_next += 2;
174 n_left_to_next -= 2;
175 n_left_from -= 2;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700176
Hongjun Nidf921cc2016-05-25 01:16:19 +0800177 b0 = vlib_get_buffer (vm, bi0);
178 b1 = vlib_get_buffer (vm, bi1);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700179
Hongjun Nidf921cc2016-05-25 01:16:19 +0800180 if (is_ip4)
181 {
182 /* udp leaves current_data pointing at the vxlan-gpe header */
183 vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
184 vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700185
Hongjun Nidf921cc2016-05-25 01:16:19 +0800186 iuvn4_0 = vlib_buffer_get_current (b0);
187 iuvn4_1 = vlib_buffer_get_current (b1);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700188
Hongjun Nidf921cc2016-05-25 01:16:19 +0800189 /* pop (ip, udp, vxlan) */
190 vlib_buffer_advance (b0, sizeof(*iuvn4_0));
191 vlib_buffer_advance (b1, sizeof(*iuvn4_1));
192 }
193 else
194 {
195 /* udp leaves current_data pointing at the vxlan-gpe header */
196 vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
197 vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700198
Hongjun Nidf921cc2016-05-25 01:16:19 +0800199 iuvn6_0 = vlib_buffer_get_current (b0);
200 iuvn6_1 = vlib_buffer_get_current (b1);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700201
Hongjun Nidf921cc2016-05-25 01:16:19 +0800202 /* pop (ip, udp, vxlan) */
203 vlib_buffer_advance (b0, sizeof(*iuvn6_0));
204 vlib_buffer_advance (b1, sizeof(*iuvn6_1));
205 }
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700206
Hongjun Nidf921cc2016-05-25 01:16:19 +0800207 tunnel_index0 = ~0;
208 tunnel_index1 = ~0;
209 error0 = 0;
210 error1 = 0;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700211
Hongjun Nidf921cc2016-05-25 01:16:19 +0800212 if (is_ip4)
213 {
Vengada Govindan6d403a02016-10-12 05:54:09 -0700214 next0 =
215 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800216 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
Vengada Govindan6d403a02016-10-12 05:54:09 -0700217 VXLAN_GPE_INPUT_NEXT_DROP;
218 next1 =
219 (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800220 nngm->decap_next_node_list[iuvn4_1->vxlan.protocol]: \
Vengada Govindan6d403a02016-10-12 05:54:09 -0700221 VXLAN_GPE_INPUT_NEXT_DROP;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700222
Hongjun Nidf921cc2016-05-25 01:16:19 +0800223 key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
224 key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700225
Hongjun Nidf921cc2016-05-25 01:16:19 +0800226 key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
227 key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700228
Hongjun Nidf921cc2016-05-25 01:16:19 +0800229 key4_0.vni = iuvn4_0->vxlan.vni_res;
230 key4_1.vni = iuvn4_1->vxlan.vni_res;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700231
Hongjun Nidf921cc2016-05-25 01:16:19 +0800232 key4_0.pad = 0;
233 key4_1.pad = 0;
Hongjun Nie74c1dc2016-10-27 18:39:44 +0800234 }
235 else /* is_ip6 */
236 {
237 next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
238 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
239 next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
240 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700241
Hongjun Nie74c1dc2016-10-27 18:39:44 +0800242 key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
243 key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
244 key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
245 key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
246
247 key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
248 key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
249 key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
250 key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
251
252 key6_0.vni = iuvn6_0->vxlan.vni_res;
253 key6_1.vni = iuvn6_1->vxlan.vni_res;
254 }
255
256 /* Processing packet 0*/
257 if (is_ip4)
258 {
Hongjun Nidf921cc2016-05-25 01:16:19 +0800259 /* Processing for key4_0 */
260 if (PREDICT_FALSE((key4_0.as_u64[0] != last_key4.as_u64[0])
261 || (key4_0.as_u64[1] != last_key4.as_u64[1])))
262 {
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800263 p0 = hash_get_mem(nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700264
Hongjun Nidf921cc2016-05-25 01:16:19 +0800265 if (p0 == 0)
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700266 {
Hongjun Nidf921cc2016-05-25 01:16:19 +0800267 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
268 goto trace0;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700269 }
270
Hongjun Nidf921cc2016-05-25 01:16:19 +0800271 last_key4.as_u64[0] = key4_0.as_u64[0];
272 last_key4.as_u64[1] = key4_0.as_u64[1];
273 tunnel_index0 = last_tunnel_index = p0[0];
274 }
275 else
276 tunnel_index0 = last_tunnel_index;
277 }
278 else /* is_ip6 */
279 {
Vengada Govindan6d403a02016-10-12 05:54:09 -0700280 next0 =
281 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800282 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
Vengada Govindan6d403a02016-10-12 05:54:09 -0700283 VXLAN_GPE_INPUT_NEXT_DROP;
284 next1 =
285 (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800286 nngm->decap_next_node_list[iuvn6_1->vxlan.protocol]: \
Vengada Govindan6d403a02016-10-12 05:54:09 -0700287 VXLAN_GPE_INPUT_NEXT_DROP;
288
289 key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
290 key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
291 key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
292 key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
293
294 key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
295 key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
296 key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
297 key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
298
299 key6_0.vni = iuvn6_0->vxlan.vni_res;
300 key6_1.vni = iuvn6_1->vxlan.vni_res;
301
Hongjun Nidf921cc2016-05-25 01:16:19 +0800302 /* Processing for key6_0 */
303 if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
304 {
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800305 p0 = hash_get_mem(nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700306
Hongjun Nidf921cc2016-05-25 01:16:19 +0800307 if (p0 == 0)
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700308 {
Hongjun Nidf921cc2016-05-25 01:16:19 +0800309 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
310 goto trace0;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700311 }
312
Hongjun Nidf921cc2016-05-25 01:16:19 +0800313 memcpy (&last_key6, &key6_0, sizeof(key6_0));
314 tunnel_index0 = last_tunnel_index = p0[0];
315 }
316 else
317 tunnel_index0 = last_tunnel_index;
318 }
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700319
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800320 t0 = pool_elt_at_index(nngm->tunnels, tunnel_index0);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700321
Hongjun Nidf921cc2016-05-25 01:16:19 +0800322
323 sw_if_index0 = t0->sw_if_index;
324 len0 = vlib_buffer_length_in_chain (vm, b0);
325
326 /* Required to make the l2 tag push / pop code work on l2 subifs */
327 vnet_update_l2_len (b0);
328
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700329 /**
Hongjun Nidf921cc2016-05-25 01:16:19 +0800330 * ip[46] lookup in the configured FIB
331 */
332 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
333
334 pkts_decapsulated++;
335 stats_n_packets += 1;
336 stats_n_bytes += len0;
337
338 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
339 {
340 stats_n_packets -= 1;
341 stats_n_bytes -= len0;
342 if (stats_n_packets)
343 vlib_increment_combined_counter (
344 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +0200345 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800346 stats_n_packets = 1;
347 stats_n_bytes = len0;
348 stats_sw_if_index = sw_if_index0;
349 }
350
351 trace0: b0->error = error0 ? node->errors[error0] : 0;
352
353 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
354 {
355 vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
356 tr->next_index = next0;
357 tr->error = error0;
358 tr->tunnel_index = tunnel_index0;
359 }
360
361 /* Process packet 1 */
362 if (is_ip4)
363 {
364 /* Processing for key4_1 */
365 if (PREDICT_FALSE(
366 (key4_1.as_u64[0] != last_key4.as_u64[0])
367 || (key4_1.as_u64[1] != last_key4.as_u64[1])))
368 {
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800369 p1 = hash_get_mem(nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800370
371 if (p1 == 0)
372 {
373 error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
374 goto trace1;
375 }
376
377 last_key4.as_u64[0] = key4_1.as_u64[0];
378 last_key4.as_u64[1] = key4_1.as_u64[1];
379 tunnel_index1 = last_tunnel_index = p1[0];
380 }
381 else
382 tunnel_index1 = last_tunnel_index;
383 }
384 else /* is_ip6 */
385 {
386 /* Processing for key6_1 */
387 if (PREDICT_FALSE(memcmp (&key6_1, &last_key6, sizeof(last_key6)) != 0))
388 {
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800389 p1 = hash_get_mem(nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800390
391 if (p1 == 0)
392 {
393 error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
394 goto trace1;
395 }
396
397 memcpy (&last_key6, &key6_1, sizeof(key6_1));
398 tunnel_index1 = last_tunnel_index = p1[0];
399 }
400 else
401 tunnel_index1 = last_tunnel_index;
402 }
403
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800404 t1 = pool_elt_at_index(nngm->tunnels, tunnel_index1);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800405
Hongjun Nidf921cc2016-05-25 01:16:19 +0800406 sw_if_index1 = t1->sw_if_index;
407 len1 = vlib_buffer_length_in_chain (vm, b1);
408
409 /* Required to make the l2 tag push / pop code work on l2 subifs */
410 vnet_update_l2_len (b1);
411
412 /*
413 * ip[46] lookup in the configured FIB
414 */
415 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
416
417 pkts_decapsulated++;
418 stats_n_packets += 1;
419 stats_n_bytes += len1;
420
421 /* Batch stats increment on the same vxlan tunnel so counter
422 is not incremented per packet */
423 if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
424 {
425 stats_n_packets -= 1;
426 stats_n_bytes -= len1;
427 if (stats_n_packets)
428 vlib_increment_combined_counter (
429 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +0200430 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800431 stats_n_packets = 1;
432 stats_n_bytes = len1;
433 stats_sw_if_index = sw_if_index1;
434 }
435 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
436
437 trace1: b1->error = error1 ? node->errors[error1] : 0;
438
439 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
440 {
441 vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof(*tr));
442 tr->next_index = next1;
443 tr->error = error1;
444 tr->tunnel_index = tunnel_index1;
445 }
446
447 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
448 n_left_to_next, bi0, bi1, next0, next1);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700449 }
Hongjun Nidf921cc2016-05-25 01:16:19 +0800450
451 while (n_left_from > 0 && n_left_to_next > 0)
452 {
453 u32 bi0;
454 vlib_buffer_t * b0;
455 u32 next0;
456 ip4_vxlan_gpe_header_t * iuvn4_0;
457 ip6_vxlan_gpe_header_t * iuvn6_0;
458 uword * p0;
459 u32 tunnel_index0;
460 vxlan_gpe_tunnel_t * t0;
461 vxlan4_gpe_tunnel_key_t key4_0;
462 vxlan6_gpe_tunnel_key_t key6_0;
463 u32 error0;
464 u32 sw_if_index0, len0;
465
466 bi0 = from[0];
467 to_next[0] = bi0;
468 from += 1;
469 to_next += 1;
470 n_left_from -= 1;
471 n_left_to_next -= 1;
472
473 b0 = vlib_get_buffer (vm, bi0);
474
475 if (is_ip4)
476 {
477 /* udp leaves current_data pointing at the vxlan-gpe header */
478 vlib_buffer_advance (
479 b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
480
481 iuvn4_0 = vlib_buffer_get_current (b0);
482
483 /* pop (ip, udp, vxlan) */
484 vlib_buffer_advance (b0, sizeof(*iuvn4_0));
485 }
486 else
487 {
488 /* udp leaves current_data pointing at the vxlan-gpe header */
489 vlib_buffer_advance (
490 b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
491
492 iuvn6_0 = vlib_buffer_get_current (b0);
493
494 /* pop (ip, udp, vxlan) */
495 vlib_buffer_advance (b0, sizeof(*iuvn6_0));
496 }
497
498 tunnel_index0 = ~0;
499 error0 = 0;
500
501 if (is_ip4)
502 {
503 next0 =
Vengada Govindan6d403a02016-10-12 05:54:09 -0700504 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800505 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
Vengada Govindan6d403a02016-10-12 05:54:09 -0700506 VXLAN_GPE_INPUT_NEXT_DROP;
Hongjun Nidf921cc2016-05-25 01:16:19 +0800507
508 key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
509 key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
510 key4_0.vni = iuvn4_0->vxlan.vni_res;
511 key4_0.pad = 0;
512
513 /* Processing for key4_0 */
514 if (PREDICT_FALSE(
515 (key4_0.as_u64[0] != last_key4.as_u64[0])
516 || (key4_0.as_u64[1] != last_key4.as_u64[1])))
517 {
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800518 p0 = hash_get_mem(nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800519
520 if (p0 == 0)
521 {
522 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
523 goto trace00;
524 }
525
526 last_key4.as_u64[0] = key4_0.as_u64[0];
527 last_key4.as_u64[1] = key4_0.as_u64[1];
528 tunnel_index0 = last_tunnel_index = p0[0];
529 }
530 else
531 tunnel_index0 = last_tunnel_index;
532 }
533 else /* is_ip6 */
534 {
Vengada Govindan6d403a02016-10-12 05:54:09 -0700535 next0 =
536 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800537 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
Vengada Govindan6d403a02016-10-12 05:54:09 -0700538 VXLAN_GPE_INPUT_NEXT_DROP;
Hongjun Nidf921cc2016-05-25 01:16:19 +0800539
540 key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
541 key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
542 key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
543 key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
544 key6_0.vni = iuvn6_0->vxlan.vni_res;
545
546 /* Processing for key6_0 */
547 if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
548 {
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800549 p0 = hash_get_mem(nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800550
551 if (p0 == 0)
552 {
553 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
554 goto trace00;
555 }
556
557 memcpy (&last_key6, &key6_0, sizeof(key6_0));
558 tunnel_index0 = last_tunnel_index = p0[0];
559 }
560 else
561 tunnel_index0 = last_tunnel_index;
562 }
563
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800564 t0 = pool_elt_at_index(nngm->tunnels, tunnel_index0);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800565
Hongjun Nidf921cc2016-05-25 01:16:19 +0800566
567 sw_if_index0 = t0->sw_if_index;
568 len0 = vlib_buffer_length_in_chain (vm, b0);
569
570 /* Required to make the l2 tag push / pop code work on l2 subifs */
571 vnet_update_l2_len (b0);
572
573 /*
574 * ip[46] lookup in the configured FIB
575 */
576 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
577
578 pkts_decapsulated++;
579 stats_n_packets += 1;
580 stats_n_bytes += len0;
581
582 /* Batch stats increment on the same vxlan-gpe tunnel so counter
583 is not incremented per packet */
584 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
585 {
586 stats_n_packets -= 1;
587 stats_n_bytes -= len0;
588 if (stats_n_packets)
589 vlib_increment_combined_counter (
590 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
Damjan Marion586afd72017-04-05 19:18:20 +0200591 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800592 stats_n_packets = 1;
593 stats_n_bytes = len0;
594 stats_sw_if_index = sw_if_index0;
595 }
596
597 trace00: b0->error = error0 ? node->errors[error0] : 0;
598
599 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
600 {
601 vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
602 tr->next_index = next0;
603 tr->error = error0;
604 tr->tunnel_index = tunnel_index0;
605 }
606 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
607 n_left_to_next, bi0, next0);
608 }
609
610 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
611 }
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700612 vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
Hongjun Nidf921cc2016-05-25 01:16:19 +0800613 VXLAN_GPE_ERROR_DECAPSULATED, pkts_decapsulated);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700614 /* Increment any remaining batch stats */
615 if (stats_n_packets)
616 {
Hongjun Nidf921cc2016-05-25 01:16:19 +0800617 vlib_increment_combined_counter (
Damjan Marion586afd72017-04-05 19:18:20 +0200618 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, thread_index,
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700619 stats_sw_if_index, stats_n_packets, stats_n_bytes);
620 node->runtime_data[0] = stats_sw_if_index;
621 }
622 return from_frame->n_vectors;
623}
624
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700625/**
626 * @brief Graph processing dispatch function for IPv4 VXLAN GPE
627 *
628 * @node vxlan4-gpe-input
629 * @param *vm
630 * @param *node
631 * @param *from_frame
632 *
633 * @return from_frame->n_vectors
634 *
635 */
Hongjun Nidf921cc2016-05-25 01:16:19 +0800636static uword
637vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
638 vlib_frame_t * from_frame)
639{
640 return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */1);
641}
642
Vengada Govindan6d403a02016-10-12 05:54:09 -0700643
644void
645vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
646{
647 vxlan_gpe_main_t *hm = &vxlan_gpe_main;
648 hm->decap_next_node_list[protocol_id] = next_node_index;
649 return;
650}
651
652void
653vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
654{
655 vxlan_gpe_main_t *hm = &vxlan_gpe_main;
656 hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
657 return;
658}
659
660
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700661/**
662 * @brief Graph processing dispatch function for IPv6 VXLAN GPE
663 *
664 * @node vxlan6-gpe-input
665 * @param *vm
666 * @param *node
667 * @param *from_frame
668 *
669 * @return from_frame->n_vectors - uword
670 *
671 */
Hongjun Nidf921cc2016-05-25 01:16:19 +0800672static uword
673vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
674 vlib_frame_t * from_frame)
675{
676 return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */0);
677}
678
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700679/**
680 * @brief VXLAN GPE error strings
681 */
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700682static char * vxlan_gpe_error_strings[] = {
683#define vxlan_gpe_error(n,s) s,
684#include <vnet/vxlan-gpe/vxlan_gpe_error.def>
685#undef vxlan_gpe_error
686#undef _
687};
688
Hongjun Nidf921cc2016-05-25 01:16:19 +0800689VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
690 .function = vxlan4_gpe_input,
691 .name = "vxlan4-gpe-input",
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700692 /* Takes a vector of packets. */
693 .vector_size = sizeof (u32),
694 .type = VLIB_NODE_TYPE_INTERNAL,
695 .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
696 .error_strings = vxlan_gpe_error_strings,
697
698 .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
699 .next_nodes = {
700#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
701 foreach_vxlan_gpe_input_next
702#undef _
703 },
704
705 .format_buffer = format_vxlan_gpe_with_length,
706 .format_trace = format_vxlan_gpe_rx_trace,
707 // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
708};
709
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700710VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input);
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700711
Hongjun Nidf921cc2016-05-25 01:16:19 +0800712VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
713 .function = vxlan6_gpe_input,
714 .name = "vxlan6-gpe-input",
715 /* Takes a vector of packets. */
716 .vector_size = sizeof (u32),
717 .type = VLIB_NODE_TYPE_INTERNAL,
718 .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
719 .error_strings = vxlan_gpe_error_strings,
720
721 .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
722 .next_nodes = {
723#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
724 foreach_vxlan_gpe_input_next
725#undef _
726 },
727
728 .format_buffer = format_vxlan_gpe_with_length,
729 .format_trace = format_vxlan_gpe_rx_trace,
730 // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
731};
732
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700733VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input);
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800734typedef enum {
735 IP_VXLAN_BYPASS_NEXT_DROP,
736 IP_VXLAN_BYPASS_NEXT_VXLAN,
737 IP_VXLAN_BYPASS_N_NEXT,
738} ip_vxan_bypass_next_t;
739
740always_inline uword
741ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
742 vlib_node_runtime_t * node,
743 vlib_frame_t * frame,
744 u32 is_ip4)
745{
746 vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
747 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
748 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
749 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
750 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
751
752 from = vlib_frame_vector_args (frame);
753 n_left_from = frame->n_vectors;
754 next_index = node->cached_next_index;
755
756 if (node->flags & VLIB_NODE_FLAG_TRACE)
757 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
758
759 if (is_ip4) addr4.data_u32 = ~0;
760 else ip6_address_set_zero (&addr6);
761
762 while (n_left_from > 0)
763 {
764 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
765
766 while (n_left_from >= 4 && n_left_to_next >= 2)
767 {
768 vlib_buffer_t * b0, * b1;
769 ip4_header_t * ip40, * ip41;
770 ip6_header_t * ip60, * ip61;
771 udp_header_t * udp0, * udp1;
772 u32 bi0, ip_len0, udp_len0, flags0, next0;
773 u32 bi1, ip_len1, udp_len1, flags1, next1;
774 i32 len_diff0, len_diff1;
775 u8 error0, good_udp0, proto0;
776 u8 error1, good_udp1, proto1;
777
778 /* Prefetch next iteration. */
779 {
780 vlib_buffer_t * p2, * p3;
781
782 p2 = vlib_get_buffer (vm, from[2]);
783 p3 = vlib_get_buffer (vm, from[3]);
784
785 vlib_prefetch_buffer_header (p2, LOAD);
786 vlib_prefetch_buffer_header (p3, LOAD);
787
788 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
789 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
790 }
791
792 bi0 = to_next[0] = from[0];
793 bi1 = to_next[1] = from[1];
794 from += 2;
795 n_left_from -= 2;
796 to_next += 2;
797 n_left_to_next -= 2;
798
799 b0 = vlib_get_buffer (vm, bi0);
800 b1 = vlib_get_buffer (vm, bi1);
801 if (is_ip4)
802 {
803 ip40 = vlib_buffer_get_current (b0);
804 ip41 = vlib_buffer_get_current (b1);
805 }
806 else
807 {
808 ip60 = vlib_buffer_get_current (b0);
809 ip61 = vlib_buffer_get_current (b1);
810 }
811
812 /* Setup packet for next IP feature */
813 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
814 vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
815
816 if (is_ip4)
817 {
818 proto0 = ip40->protocol;
819 proto1 = ip41->protocol;
820 }
821 else
822 {
823 proto0 = ip60->protocol;
824 proto1 = ip61->protocol;
825 }
826
827 /* Process packet 0 */
828 if (proto0 != IP_PROTOCOL_UDP)
829 goto exit0; /* not UDP packet */
830
831 if (is_ip4)
832 udp0 = ip4_next_header (ip40);
833 else
834 udp0 = ip6_next_header (ip60);
835
836 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
837 goto exit0; /* not VXLAN packet */
838
839 /* Validate DIP against VTEPs*/
840 if (is_ip4)
841 {
842 if (addr4.as_u32 != ip40->dst_address.as_u32)
843 {
844 if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
845 goto exit0; /* no local VTEP for VXLAN packet */
846 addr4 = ip40->dst_address;
847 }
848 }
849 else
850 {
851 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
852 {
853 if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
854 goto exit0; /* no local VTEP for VXLAN packet */
855 addr6 = ip60->dst_address;
856 }
857 }
858
859 flags0 = b0->flags;
Damjan Marion213b5aa2017-07-13 21:19:27 +0200860 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800861
862 /* Don't verify UDP checksum for packets with explicit zero checksum. */
863 good_udp0 |= udp0->checksum == 0;
864
865 /* Verify UDP length */
866 if (is_ip4)
867 ip_len0 = clib_net_to_host_u16 (ip40->length);
868 else
869 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
870 udp_len0 = clib_net_to_host_u16 (udp0->length);
871 len_diff0 = ip_len0 - udp_len0;
872
873 /* Verify UDP checksum */
874 if (PREDICT_FALSE (!good_udp0))
875 {
Damjan Marion213b5aa2017-07-13 21:19:27 +0200876 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800877 {
878 if (is_ip4)
879 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
880 else
881 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
882 good_udp0 =
Damjan Marion213b5aa2017-07-13 21:19:27 +0200883 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800884 }
885 }
886
887 if (is_ip4)
888 {
889 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
890 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
891 }
892 else
893 {
894 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
895 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
896 }
897
898 next0 = error0 ?
899 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
900 b0->error = error0 ? error_node->errors[error0] : 0;
901
902 /* vxlan_gpe-input node expect current at VXLAN header */
903 if (is_ip4)
904 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
905 else
906 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
907
908 exit0:
909 /* Process packet 1 */
910 if (proto1 != IP_PROTOCOL_UDP)
911 goto exit1; /* not UDP packet */
912
913 if (is_ip4)
914 udp1 = ip4_next_header (ip41);
915 else
916 udp1 = ip6_next_header (ip61);
917
918 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
919 goto exit1; /* not VXLAN packet */
920
921 /* Validate DIP against VTEPs*/
922 if (is_ip4)
923 {
924 if (addr4.as_u32 != ip41->dst_address.as_u32)
925 {
926 if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32))
927 goto exit1; /* no local VTEP for VXLAN packet */
928 addr4 = ip41->dst_address;
929 }
930 }
931 else
932 {
933 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
934 {
935 if (!hash_get_mem (ngm->vtep6, &ip61->dst_address))
936 goto exit1; /* no local VTEP for VXLAN packet */
937 addr6 = ip61->dst_address;
938 }
939 }
940
941 flags1 = b1->flags;
Damjan Marion213b5aa2017-07-13 21:19:27 +0200942 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800943
944 /* Don't verify UDP checksum for packets with explicit zero checksum. */
945 good_udp1 |= udp1->checksum == 0;
946
947 /* Verify UDP length */
948 if (is_ip4)
949 ip_len1 = clib_net_to_host_u16 (ip41->length);
950 else
951 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
952 udp_len1 = clib_net_to_host_u16 (udp1->length);
953 len_diff1 = ip_len1 - udp_len1;
954
955 /* Verify UDP checksum */
956 if (PREDICT_FALSE (!good_udp1))
957 {
Damjan Marion213b5aa2017-07-13 21:19:27 +0200958 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800959 {
960 if (is_ip4)
961 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
962 else
963 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
964 good_udp1 =
Damjan Marion213b5aa2017-07-13 21:19:27 +0200965 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +0800966 }
967 }
968
969 if (is_ip4)
970 {
971 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
972 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
973 }
974 else
975 {
976 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
977 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
978 }
979
980 next1 = error1 ?
981 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
982 b1->error = error1 ? error_node->errors[error1] : 0;
983
984 /* vxlan_gpe-input node expect current at VXLAN header */
985 if (is_ip4)
986 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
987 else
988 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
989
990 exit1:
991 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
992 to_next, n_left_to_next,
993 bi0, bi1, next0, next1);
994 }
995
996 while (n_left_from > 0 && n_left_to_next > 0)
997 {
998 vlib_buffer_t * b0;
999 ip4_header_t * ip40;
1000 ip6_header_t * ip60;
1001 udp_header_t * udp0;
1002 u32 bi0, ip_len0, udp_len0, flags0, next0;
1003 i32 len_diff0;
1004 u8 error0, good_udp0, proto0;
1005
1006 bi0 = to_next[0] = from[0];
1007 from += 1;
1008 n_left_from -= 1;
1009 to_next += 1;
1010 n_left_to_next -= 1;
1011
1012 b0 = vlib_get_buffer (vm, bi0);
1013 if (is_ip4)
1014 ip40 = vlib_buffer_get_current (b0);
1015 else
1016 ip60 = vlib_buffer_get_current (b0);
1017
1018 /* Setup packet for next IP feature */
1019 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
1020
1021 if (is_ip4)
1022 proto0 = ip40->protocol;
1023 else
1024 proto0 = ip60->protocol;
1025
1026 if (proto0 != IP_PROTOCOL_UDP)
1027 goto exit; /* not UDP packet */
1028
1029 if (is_ip4)
1030 udp0 = ip4_next_header (ip40);
1031 else
1032 udp0 = ip6_next_header (ip60);
1033
1034 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
1035 goto exit; /* not VXLAN packet */
1036
1037 /* Validate DIP against VTEPs*/
1038 if (is_ip4)
1039 {
1040 if (addr4.as_u32 != ip40->dst_address.as_u32)
1041 {
1042 if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
1043 goto exit; /* no local VTEP for VXLAN packet */
1044 addr4 = ip40->dst_address;
1045 }
1046 }
1047 else
1048 {
1049 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1050 {
1051 if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
1052 goto exit; /* no local VTEP for VXLAN packet */
1053 addr6 = ip60->dst_address;
1054 }
1055 }
1056
1057 flags0 = b0->flags;
Damjan Marion213b5aa2017-07-13 21:19:27 +02001058 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +08001059
1060 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1061 good_udp0 |= udp0->checksum == 0;
1062
1063 /* Verify UDP length */
1064 if (is_ip4)
1065 ip_len0 = clib_net_to_host_u16 (ip40->length);
1066 else
1067 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1068 udp_len0 = clib_net_to_host_u16 (udp0->length);
1069 len_diff0 = ip_len0 - udp_len0;
1070
1071 /* Verify UDP checksum */
1072 if (PREDICT_FALSE (!good_udp0))
1073 {
Damjan Marion213b5aa2017-07-13 21:19:27 +02001074 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +08001075 {
1076 if (is_ip4)
1077 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1078 else
1079 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1080 good_udp0 =
Damjan Marion213b5aa2017-07-13 21:19:27 +02001081 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
Hongjun Ni8a0a0ae2017-05-27 20:23:09 +08001082 }
1083 }
1084
1085 if (is_ip4)
1086 {
1087 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1088 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1089 }
1090 else
1091 {
1092 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1093 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1094 }
1095
1096 next0 = error0 ?
1097 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1098 b0->error = error0 ? error_node->errors[error0] : 0;
1099
1100 /* vxlan_gpe-input node expect current at VXLAN header */
1101 if (is_ip4)
1102 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1103 else
1104 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1105
1106 exit:
1107 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1108 to_next, n_left_to_next,
1109 bi0, next0);
1110 }
1111
1112 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1113 }
1114
1115 return frame->n_vectors;
1116}
1117
1118static uword
1119ip4_vxlan_gpe_bypass (vlib_main_t * vm,
1120 vlib_node_runtime_t * node,
1121 vlib_frame_t * frame)
1122{
1123 return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1124}
1125
1126VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
1127 .function = ip4_vxlan_gpe_bypass,
1128 .name = "ip4-vxlan-gpe-bypass",
1129 .vector_size = sizeof (u32),
1130
1131 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1132 .next_nodes = {
1133 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1134 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
1135 },
1136
1137 .format_buffer = format_ip4_header,
1138 .format_trace = format_ip4_forward_next_trace,
1139};
1140
1141VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node,ip4_vxlan_gpe_bypass)
1142
1143/* Dummy init function to get us linked in. */
1144clib_error_t * ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
1145{ return 0; }
1146
1147VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
1148
1149static uword
1150ip6_vxlan_gpe_bypass (vlib_main_t * vm,
1151 vlib_node_runtime_t * node,
1152 vlib_frame_t * frame)
1153{
1154 return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1155}
1156
1157VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
1158 .function = ip6_vxlan_gpe_bypass,
1159 .name = "ip6-vxlan-gpe-bypass",
1160 .vector_size = sizeof (u32),
1161
1162 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1163 .next_nodes = {
1164 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1165 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
1166 },
1167
1168 .format_buffer = format_ip6_header,
1169 .format_trace = format_ip6_forward_next_trace,
1170};
1171
1172VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node,ip6_vxlan_gpe_bypass)
1173
1174/* Dummy init function to get us linked in. */
1175clib_error_t * ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
1176{ return 0; }
1177
1178VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);