blob: 2fe3fa8d437b552dea50ca783e24f606ff78fcc8 [file] [log] [blame]
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/vxlan-gbp/vxlan_gbp.h>
21#include <vnet/qos/qos_types.h>
22#include <vnet/adj/rewrite.h>
23
24/* Statistics (not all errors) */
25#define foreach_vxlan_gbp_encap_error \
26_(ENCAPSULATED, "good packets encapsulated")
27
28static char *vxlan_gbp_encap_error_strings[] = {
29#define _(sym,string) string,
30 foreach_vxlan_gbp_encap_error
31#undef _
32};
33
34typedef enum
35{
36#define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
37 foreach_vxlan_gbp_encap_error
38#undef _
39 VXLAN_GBP_ENCAP_N_ERROR,
40} vxlan_gbp_encap_error_t;
41
42typedef enum
43{
44 VXLAN_GBP_ENCAP_NEXT_DROP,
45 VXLAN_GBP_ENCAP_N_NEXT,
46} vxlan_gbp_encap_next_t;
47
48typedef struct
49{
50 u32 tunnel_index;
51 u32 vni;
52 u16 sclass;
Neale Ranns45db8852019-01-09 00:04:04 -080053 u8 flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020054} vxlan_gbp_encap_trace_t;
55
Filip Tehlare1714d32019-03-05 03:01:43 -080056#ifndef CLIB_MARCH_VARIANT
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020057u8 *
58format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
59{
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
63
Neale Ranns45db8852019-01-09 00:04:04 -080064 s =
65 format (s,
66 "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %d",
67 t->tunnel_index, t->vni, t->sclass, t->flags);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020068 return s;
69}
Filip Tehlare1714d32019-03-05 03:01:43 -080070#endif /* CLIB_MARCH_VARIANT */
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020071
72always_inline uword
73vxlan_gbp_encap_inline (vlib_main_t * vm,
74 vlib_node_runtime_t * node,
75 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
76{
77 u32 n_left_from, next_index, *from, *to_next;
78 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
79 vnet_main_t *vnm = vxm->vnet_main;
80 vnet_interface_main_t *im = &vnm->interface_main;
81 vlib_combined_counter_main_t *tx_counter =
82 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
83 u32 pkts_encapsulated = 0;
84 u32 thread_index = vlib_get_thread_index ();
85 u32 sw_if_index0 = 0, sw_if_index1 = 0;
86 u32 next0 = 0, next1 = 0;
87 vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
88 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
89
90 from = vlib_frame_vector_args (from_frame);
91 n_left_from = from_frame->n_vectors;
92
93 next_index = node->cached_next_index;
94
95 STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
96 STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
97
98 u8 const underlay_hdr_len = is_ip4 ?
99 sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
100 u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
101 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
102 u32 const csum_flags = is_ip4 ?
103 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
104 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM : VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
105
106 while (n_left_from > 0)
107 {
108 u32 n_left_to_next;
109
110 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
111
112 while (n_left_from >= 4 && n_left_to_next >= 2)
113 {
114 /* Prefetch next iteration. */
115 {
116 vlib_buffer_t *p2, *p3;
117
118 p2 = vlib_get_buffer (vm, from[2]);
119 p3 = vlib_get_buffer (vm, from[3]);
120
121 vlib_prefetch_buffer_header (p2, LOAD);
122 vlib_prefetch_buffer_header (p3, LOAD);
123
124 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
125 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
126 }
127
128 u32 bi0 = to_next[0] = from[0];
129 u32 bi1 = to_next[1] = from[1];
130 from += 2;
131 to_next += 2;
132 n_left_to_next -= 2;
133 n_left_from -= 2;
134
135 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
136 vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
137 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
138 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
139
140 /* Get next node index and adj index from tunnel next_dpo */
141 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
142 {
143 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
144 vnet_hw_interface_t *hi0 =
145 vnet_get_sup_hw_interface (vnm, sw_if_index0);
146 t0 = &vxm->tunnels[hi0->dev_instance];
147 /* Note: change to always set next0 if it may set to drop */
148 next0 = t0->next_dpo.dpoi_next_node;
149 dpoi_idx0 = t0->next_dpo.dpoi_index;
150 }
151
152 /* Get next node index and adj index from tunnel next_dpo */
153 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
154 {
155 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
156 {
157 sw_if_index1 = sw_if_index0;
158 t1 = t0;
159 next1 = next0;
160 dpoi_idx1 = dpoi_idx0;
161 }
162 else
163 {
164 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
165 vnet_hw_interface_t *hi1 =
166 vnet_get_sup_hw_interface (vnm, sw_if_index1);
167 t1 = &vxm->tunnels[hi1->dev_instance];
168 /* Note: change to always set next1 if it may set to drop */
169 next1 = t1->next_dpo.dpoi_next_node;
170 dpoi_idx1 = t1->next_dpo.dpoi_index;
171 }
172 }
173
174 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
175 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
176
177 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
178 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
179
180 vlib_buffer_advance (b0, -underlay_hdr_len);
181 vlib_buffer_advance (b1, -underlay_hdr_len);
182
183 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
184 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
185 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
186 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
187
188 void *underlay0 = vlib_buffer_get_current (b0);
189 void *underlay1 = vlib_buffer_get_current (b1);
190
191 /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
192 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
193 * use memcpy as a workaround */
Dave Barach178cf492018-11-13 16:34:13 -0500194 clib_memcpy_fast (underlay0,
195 t0->rewrite_header.data + rw_hdr_offset,
196 underlay_hdr_len);
197 clib_memcpy_fast (underlay1,
198 t1->rewrite_header.data + rw_hdr_offset,
199 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200200
201 ip4_header_t *ip4_0, *ip4_1;
202 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
203 ip6_header_t *ip6_0, *ip6_1;
204 udp_header_t *udp0, *udp1;
205 vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
206 u8 *l3_0, *l3_1;
207 if (is_ip4)
208 {
209 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
210 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
211
212 /* Fix the IP4 checksum and length */
213 ip4_0 = &hdr0->ip4;
214 ip4_1 = &hdr1->ip4;
215 ip4_0->length = clib_host_to_net_u16 (len0);
216 ip4_1->length = clib_host_to_net_u16 (len1);
217
218 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
219 {
220 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
221 ip4_0->tos = ip4_0_tos;
222 }
223 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
224 {
225 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
226 ip4_1->tos = ip4_1_tos;
227 }
228
229 l3_0 = (u8 *) ip4_0;
230 l3_1 = (u8 *) ip4_1;
231 udp0 = &hdr0->udp;
232 udp1 = &hdr1->udp;
233 vxlan_gbp0 = &hdr0->vxlan_gbp;
234 vxlan_gbp1 = &hdr1->vxlan_gbp;
235 }
236 else /* ipv6 */
237 {
238 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
239 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
240
241 /* Fix IP6 payload length */
242 ip6_0 = &hdr0->ip6;
243 ip6_1 = &hdr1->ip6;
244 ip6_0->payload_length = payload_l0;
245 ip6_1->payload_length = payload_l1;
246
247 l3_0 = (u8 *) ip6_0;
248 l3_1 = (u8 *) ip6_1;
249 udp0 = &hdr0->udp;
250 udp1 = &hdr1->udp;
251 vxlan_gbp0 = &hdr0->vxlan_gbp;
252 vxlan_gbp1 = &hdr1->vxlan_gbp;
253 }
254
255 /* Fix UDP length and set source port */
256 udp0->length = payload_l0;
257 udp0->src_port = flow_hash0;
258 udp1->length = payload_l1;
259 udp1->src_port = flow_hash1;
260
261 /* set source class and gpflags */
262 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
263 vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
264 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800265 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200266 vxlan_gbp1->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800267 clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200268
269 if (csum_offload)
270 {
271 b0->flags |= csum_flags;
272 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
273 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
274 b1->flags |= csum_flags;
275 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
276 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
277 }
278 /* IPv4 UDP checksum only if checksum offload is used */
279 else if (is_ip4)
280 {
281 ip_csum_t sum0 = ip4_0->checksum;
282 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
283 length /* changed member */ );
284 if (PREDICT_FALSE (ip4_0_tos))
285 {
286 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
287 tos /* changed member */ );
288 }
289 ip4_0->checksum = ip_csum_fold (sum0);
290 ip_csum_t sum1 = ip4_1->checksum;
291 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
292 length /* changed member */ );
293 if (PREDICT_FALSE (ip4_1_tos))
294 {
295 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
296 tos /* changed member */ );
297 }
298 ip4_1->checksum = ip_csum_fold (sum1);
299 }
300 /* IPv6 UDP checksum is mandatory */
301 else
302 {
303 int bogus = 0;
304
305 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
306 (vm, b0, ip6_0, &bogus);
307 ASSERT (bogus == 0);
308 if (udp0->checksum == 0)
309 udp0->checksum = 0xffff;
310 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
311 (vm, b1, ip6_1, &bogus);
312 ASSERT (bogus == 0);
313 if (udp1->checksum == 0)
314 udp1->checksum = 0xffff;
315 }
316
317 vlib_increment_combined_counter (tx_counter, thread_index,
318 sw_if_index0, 1, len0);
319 vlib_increment_combined_counter (tx_counter, thread_index,
320 sw_if_index1, 1, len1);
321 pkts_encapsulated += 2;
322
323 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
324 {
325 vxlan_gbp_encap_trace_t *tr =
326 vlib_add_trace (vm, node, b0, sizeof (*tr));
327 tr->tunnel_index = t0 - vxm->tunnels;
328 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800329 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800330 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200331 }
332
333 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
334 {
335 vxlan_gbp_encap_trace_t *tr =
336 vlib_add_trace (vm, node, b1, sizeof (*tr));
337 tr->tunnel_index = t1 - vxm->tunnels;
338 tr->vni = t1->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800339 tr->sclass = vnet_buffer2 (b1)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800340 tr->flags = vnet_buffer2 (b1)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200341 }
342
343 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
344 to_next, n_left_to_next,
345 bi0, bi1, next0, next1);
346 }
347
348 while (n_left_from > 0 && n_left_to_next > 0)
349 {
350 u32 bi0 = to_next[0] = from[0];
351 from += 1;
352 to_next += 1;
353 n_left_from -= 1;
354 n_left_to_next -= 1;
355
356 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
357 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
358
359 /* Get next node index and adj index from tunnel next_dpo */
360 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
361 {
362 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
363 vnet_hw_interface_t *hi0 =
364 vnet_get_sup_hw_interface (vnm, sw_if_index0);
365 t0 = &vxm->tunnels[hi0->dev_instance];
366 /* Note: change to always set next0 if it may be set to drop */
367 next0 = t0->next_dpo.dpoi_next_node;
368 dpoi_idx0 = t0->next_dpo.dpoi_index;
369 }
370 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
371
372 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
373
374 vlib_buffer_advance (b0, -underlay_hdr_len);
375 void *underlay0 = vlib_buffer_get_current (b0);
376
377 /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
378 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
379 * use memcpy as a workaround */
Dave Barach178cf492018-11-13 16:34:13 -0500380 clib_memcpy_fast (underlay0,
381 t0->rewrite_header.data + rw_hdr_offset,
382 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200383
384 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
385 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
386
387 vxlan_gbp_header_t *vxlan_gbp0;
388 udp_header_t *udp0;
389 ip4_header_t *ip4_0;
390 qos_bits_t ip4_0_tos = 0;
391 ip6_header_t *ip6_0;
392 u8 *l3_0;
393 if (is_ip4)
394 {
395 ip4_vxlan_gbp_header_t *hdr = underlay0;
396
397 /* Fix the IP4 checksum and length */
398 ip4_0 = &hdr->ip4;
399 ip4_0->length = clib_host_to_net_u16 (len0);
400
401 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
402 {
403 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
404 ip4_0->tos = ip4_0_tos;
405 }
406
407 l3_0 = (u8 *) ip4_0;
408 udp0 = &hdr->udp;
409 vxlan_gbp0 = &hdr->vxlan_gbp;
410 }
411 else /* ip6 path */
412 {
413 ip6_vxlan_gbp_header_t *hdr = underlay0;
414
415 /* Fix IP6 payload length */
416 ip6_0 = &hdr->ip6;
417 ip6_0->payload_length = payload_l0;
418
419 l3_0 = (u8 *) ip6_0;
420 udp0 = &hdr->udp;
421 vxlan_gbp0 = &hdr->vxlan_gbp;
422 }
423
424 /* Fix UDP length and set source port */
425 udp0->length = payload_l0;
426 udp0->src_port = flow_hash0;
427
428 /* set source class and gpflags */
429 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
430 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800431 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200432
433 if (csum_offload)
434 {
435 b0->flags |= csum_flags;
436 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
437 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
438 }
439 /* IPv4 UDP checksum only if checksum offload is used */
440 else if (is_ip4)
441 {
442 ip_csum_t sum0 = ip4_0->checksum;
443 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
444 length /* changed member */ );
445 if (PREDICT_FALSE (ip4_0_tos))
446 {
447 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
448 tos /* changed member */ );
449 }
450 ip4_0->checksum = ip_csum_fold (sum0);
451 }
452 /* IPv6 UDP checksum is mandatory */
453 else
454 {
455 int bogus = 0;
456
457 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
458 (vm, b0, ip6_0, &bogus);
459 ASSERT (bogus == 0);
460 if (udp0->checksum == 0)
461 udp0->checksum = 0xffff;
462 }
463
464 vlib_increment_combined_counter (tx_counter, thread_index,
465 sw_if_index0, 1, len0);
466 pkts_encapsulated++;
467
468 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
469 {
470 vxlan_gbp_encap_trace_t *tr =
471 vlib_add_trace (vm, node, b0, sizeof (*tr));
472 tr->tunnel_index = t0 - vxm->tunnels;
473 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800474 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800475 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200476 }
477 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
478 to_next, n_left_to_next,
479 bi0, next0);
480 }
481
482 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
483 }
484
485 /* Do we still need this now that tunnel tx stats is kept? */
486 vlib_node_increment_counter (vm, node->node_index,
487 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
488 pkts_encapsulated);
489
490 return from_frame->n_vectors;
491}
492
Filip Tehlare1714d32019-03-05 03:01:43 -0800493VLIB_NODE_FN (vxlan4_gbp_encap_node) (vlib_main_t * vm,
494 vlib_node_runtime_t * node,
495 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200496{
497 /* Disable chksum offload as setup overhead in tx node is not worthwhile
498 for ip4 header checksum only, unless udp checksum is also required */
499 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
500 /* csum_offload */ 0);
501}
502
Filip Tehlare1714d32019-03-05 03:01:43 -0800503VLIB_NODE_FN (vxlan6_gbp_encap_node) (vlib_main_t * vm,
504 vlib_node_runtime_t * node,
505 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200506{
507 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
508 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
509 /* csum_offload */ 1);
510}
511
512/* *INDENT-OFF* */
513VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
514{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200515 .name = "vxlan4-gbp-encap",
516 .vector_size = sizeof (u32),
517 .format_trace = format_vxlan_gbp_encap_trace,
518 .type = VLIB_NODE_TYPE_INTERNAL,
519 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
520 .error_strings = vxlan_gbp_encap_error_strings,
521 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
522 .next_nodes = {
523 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
524 },
525};
526
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200527VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
528{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200529 .name = "vxlan6-gbp-encap",
530 .vector_size = sizeof (u32),
531 .format_trace = format_vxlan_gbp_encap_trace,
532 .type = VLIB_NODE_TYPE_INTERNAL,
533 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
534 .error_strings = vxlan_gbp_encap_error_strings,
535 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
536 .next_nodes = {
537 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
538 },
539};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200540/* *INDENT-ON* */
541
542/*
543 * fd.io coding-style-patch-verification: ON
544 *
545 * Local Variables:
546 * eval: (c-set-style "gnu")
547 * End:
548 */