blob: b687cbf1cfe2a23f63e662c2887fafab1ad9fba8 [file] [log] [blame]
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/vxlan-gbp/vxlan_gbp.h>
21#include <vnet/qos/qos_types.h>
22#include <vnet/adj/rewrite.h>
23
24/* Statistics (not all errors) */
25#define foreach_vxlan_gbp_encap_error \
26_(ENCAPSULATED, "good packets encapsulated")
27
28static char *vxlan_gbp_encap_error_strings[] = {
29#define _(sym,string) string,
30 foreach_vxlan_gbp_encap_error
31#undef _
32};
33
34typedef enum
35{
36#define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
37 foreach_vxlan_gbp_encap_error
38#undef _
39 VXLAN_GBP_ENCAP_N_ERROR,
40} vxlan_gbp_encap_error_t;
41
42typedef enum
43{
44 VXLAN_GBP_ENCAP_NEXT_DROP,
45 VXLAN_GBP_ENCAP_N_NEXT,
46} vxlan_gbp_encap_next_t;
47
48typedef struct
49{
50 u32 tunnel_index;
51 u32 vni;
52 u16 sclass;
Neale Ranns45db8852019-01-09 00:04:04 -080053 u8 flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020054} vxlan_gbp_encap_trace_t;
55
Filip Tehlare1714d32019-03-05 03:01:43 -080056#ifndef CLIB_MARCH_VARIANT
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020057u8 *
58format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
59{
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
63
Neale Ranns45db8852019-01-09 00:04:04 -080064 s =
65 format (s,
66 "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %d",
67 t->tunnel_index, t->vni, t->sclass, t->flags);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020068 return s;
69}
Filip Tehlare1714d32019-03-05 03:01:43 -080070#endif /* CLIB_MARCH_VARIANT */
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020071
72always_inline uword
73vxlan_gbp_encap_inline (vlib_main_t * vm,
74 vlib_node_runtime_t * node,
75 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
76{
77 u32 n_left_from, next_index, *from, *to_next;
78 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
79 vnet_main_t *vnm = vxm->vnet_main;
80 vnet_interface_main_t *im = &vnm->interface_main;
81 vlib_combined_counter_main_t *tx_counter =
82 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
83 u32 pkts_encapsulated = 0;
84 u32 thread_index = vlib_get_thread_index ();
85 u32 sw_if_index0 = 0, sw_if_index1 = 0;
86 u32 next0 = 0, next1 = 0;
87 vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
88 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
89
90 from = vlib_frame_vector_args (from_frame);
91 n_left_from = from_frame->n_vectors;
92
93 next_index = node->cached_next_index;
94
95 STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
96 STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
97
98 u8 const underlay_hdr_len = is_ip4 ?
99 sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200100 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
101 u32 const csum_flags = is_ip4 ?
102 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
103 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM : VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
104
105 while (n_left_from > 0)
106 {
107 u32 n_left_to_next;
108
109 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
110
111 while (n_left_from >= 4 && n_left_to_next >= 2)
112 {
113 /* Prefetch next iteration. */
114 {
115 vlib_buffer_t *p2, *p3;
116
117 p2 = vlib_get_buffer (vm, from[2]);
118 p3 = vlib_get_buffer (vm, from[3]);
119
120 vlib_prefetch_buffer_header (p2, LOAD);
121 vlib_prefetch_buffer_header (p3, LOAD);
122
123 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
124 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
125 }
126
127 u32 bi0 = to_next[0] = from[0];
128 u32 bi1 = to_next[1] = from[1];
129 from += 2;
130 to_next += 2;
131 n_left_to_next -= 2;
132 n_left_from -= 2;
133
134 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
135 vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
136 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
137 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
138
139 /* Get next node index and adj index from tunnel next_dpo */
140 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
141 {
142 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
143 vnet_hw_interface_t *hi0 =
144 vnet_get_sup_hw_interface (vnm, sw_if_index0);
145 t0 = &vxm->tunnels[hi0->dev_instance];
146 /* Note: change to always set next0 if it may set to drop */
147 next0 = t0->next_dpo.dpoi_next_node;
148 dpoi_idx0 = t0->next_dpo.dpoi_index;
149 }
150
151 /* Get next node index and adj index from tunnel next_dpo */
152 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
153 {
154 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
155 {
156 sw_if_index1 = sw_if_index0;
157 t1 = t0;
158 next1 = next0;
159 dpoi_idx1 = dpoi_idx0;
160 }
161 else
162 {
163 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
164 vnet_hw_interface_t *hi1 =
165 vnet_get_sup_hw_interface (vnm, sw_if_index1);
166 t1 = &vxm->tunnels[hi1->dev_instance];
167 /* Note: change to always set next1 if it may set to drop */
168 next1 = t1->next_dpo.dpoi_next_node;
169 dpoi_idx1 = t1->next_dpo.dpoi_index;
170 }
171 }
172
173 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
174 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
175
176 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
177 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
Benoît Ganne4af1a7f2019-03-01 14:14:10 +0100178 vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0),
179 vlib_buffer_get_current (b1),
180 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200181
182 vlib_buffer_advance (b0, -underlay_hdr_len);
183 vlib_buffer_advance (b1, -underlay_hdr_len);
184
185 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
186 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
187 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
188 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
189
190 void *underlay0 = vlib_buffer_get_current (b0);
191 void *underlay1 = vlib_buffer_get_current (b1);
192
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200193 ip4_header_t *ip4_0, *ip4_1;
194 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
195 ip6_header_t *ip6_0, *ip6_1;
196 udp_header_t *udp0, *udp1;
197 vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
198 u8 *l3_0, *l3_1;
199 if (is_ip4)
200 {
201 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
202 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
203
204 /* Fix the IP4 checksum and length */
205 ip4_0 = &hdr0->ip4;
206 ip4_1 = &hdr1->ip4;
207 ip4_0->length = clib_host_to_net_u16 (len0);
208 ip4_1->length = clib_host_to_net_u16 (len1);
209
210 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
211 {
212 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
213 ip4_0->tos = ip4_0_tos;
214 }
215 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
216 {
217 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
218 ip4_1->tos = ip4_1_tos;
219 }
220
221 l3_0 = (u8 *) ip4_0;
222 l3_1 = (u8 *) ip4_1;
223 udp0 = &hdr0->udp;
224 udp1 = &hdr1->udp;
225 vxlan_gbp0 = &hdr0->vxlan_gbp;
226 vxlan_gbp1 = &hdr1->vxlan_gbp;
227 }
228 else /* ipv6 */
229 {
230 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
231 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
232
233 /* Fix IP6 payload length */
234 ip6_0 = &hdr0->ip6;
235 ip6_1 = &hdr1->ip6;
236 ip6_0->payload_length = payload_l0;
237 ip6_1->payload_length = payload_l1;
238
239 l3_0 = (u8 *) ip6_0;
240 l3_1 = (u8 *) ip6_1;
241 udp0 = &hdr0->udp;
242 udp1 = &hdr1->udp;
243 vxlan_gbp0 = &hdr0->vxlan_gbp;
244 vxlan_gbp1 = &hdr1->vxlan_gbp;
245 }
246
247 /* Fix UDP length and set source port */
248 udp0->length = payload_l0;
249 udp0->src_port = flow_hash0;
250 udp1->length = payload_l1;
251 udp1->src_port = flow_hash1;
252
253 /* set source class and gpflags */
254 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
255 vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
256 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800257 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200258 vxlan_gbp1->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800259 clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200260
261 if (csum_offload)
262 {
263 b0->flags |= csum_flags;
264 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
265 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
266 b1->flags |= csum_flags;
267 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
268 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
269 }
270 /* IPv4 UDP checksum only if checksum offload is used */
271 else if (is_ip4)
272 {
273 ip_csum_t sum0 = ip4_0->checksum;
274 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
275 length /* changed member */ );
276 if (PREDICT_FALSE (ip4_0_tos))
277 {
278 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
279 tos /* changed member */ );
280 }
281 ip4_0->checksum = ip_csum_fold (sum0);
282 ip_csum_t sum1 = ip4_1->checksum;
283 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
284 length /* changed member */ );
285 if (PREDICT_FALSE (ip4_1_tos))
286 {
287 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
288 tos /* changed member */ );
289 }
290 ip4_1->checksum = ip_csum_fold (sum1);
291 }
292 /* IPv6 UDP checksum is mandatory */
293 else
294 {
295 int bogus = 0;
296
297 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
298 (vm, b0, ip6_0, &bogus);
299 ASSERT (bogus == 0);
300 if (udp0->checksum == 0)
301 udp0->checksum = 0xffff;
302 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
303 (vm, b1, ip6_1, &bogus);
304 ASSERT (bogus == 0);
305 if (udp1->checksum == 0)
306 udp1->checksum = 0xffff;
307 }
308
309 vlib_increment_combined_counter (tx_counter, thread_index,
310 sw_if_index0, 1, len0);
311 vlib_increment_combined_counter (tx_counter, thread_index,
312 sw_if_index1, 1, len1);
313 pkts_encapsulated += 2;
314
315 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
316 {
317 vxlan_gbp_encap_trace_t *tr =
318 vlib_add_trace (vm, node, b0, sizeof (*tr));
319 tr->tunnel_index = t0 - vxm->tunnels;
320 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800321 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800322 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200323 }
324
325 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
326 {
327 vxlan_gbp_encap_trace_t *tr =
328 vlib_add_trace (vm, node, b1, sizeof (*tr));
329 tr->tunnel_index = t1 - vxm->tunnels;
330 tr->vni = t1->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800331 tr->sclass = vnet_buffer2 (b1)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800332 tr->flags = vnet_buffer2 (b1)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200333 }
334
335 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
336 to_next, n_left_to_next,
337 bi0, bi1, next0, next1);
338 }
339
340 while (n_left_from > 0 && n_left_to_next > 0)
341 {
342 u32 bi0 = to_next[0] = from[0];
343 from += 1;
344 to_next += 1;
345 n_left_from -= 1;
346 n_left_to_next -= 1;
347
348 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
349 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
350
351 /* Get next node index and adj index from tunnel next_dpo */
352 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
353 {
354 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
355 vnet_hw_interface_t *hi0 =
356 vnet_get_sup_hw_interface (vnm, sw_if_index0);
357 t0 = &vxm->tunnels[hi0->dev_instance];
358 /* Note: change to always set next0 if it may be set to drop */
359 next0 = t0->next_dpo.dpoi_next_node;
360 dpoi_idx0 = t0->next_dpo.dpoi_index;
361 }
362 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
363
364 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
Benoît Ganne4af1a7f2019-03-01 14:14:10 +0100365 vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0),
366 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200367
368 vlib_buffer_advance (b0, -underlay_hdr_len);
369 void *underlay0 = vlib_buffer_get_current (b0);
370
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200371 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
372 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
373
374 vxlan_gbp_header_t *vxlan_gbp0;
375 udp_header_t *udp0;
376 ip4_header_t *ip4_0;
377 qos_bits_t ip4_0_tos = 0;
378 ip6_header_t *ip6_0;
379 u8 *l3_0;
380 if (is_ip4)
381 {
382 ip4_vxlan_gbp_header_t *hdr = underlay0;
383
384 /* Fix the IP4 checksum and length */
385 ip4_0 = &hdr->ip4;
386 ip4_0->length = clib_host_to_net_u16 (len0);
387
388 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
389 {
390 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
391 ip4_0->tos = ip4_0_tos;
392 }
393
394 l3_0 = (u8 *) ip4_0;
395 udp0 = &hdr->udp;
396 vxlan_gbp0 = &hdr->vxlan_gbp;
397 }
398 else /* ip6 path */
399 {
400 ip6_vxlan_gbp_header_t *hdr = underlay0;
401
402 /* Fix IP6 payload length */
403 ip6_0 = &hdr->ip6;
404 ip6_0->payload_length = payload_l0;
405
406 l3_0 = (u8 *) ip6_0;
407 udp0 = &hdr->udp;
408 vxlan_gbp0 = &hdr->vxlan_gbp;
409 }
410
411 /* Fix UDP length and set source port */
412 udp0->length = payload_l0;
413 udp0->src_port = flow_hash0;
414
415 /* set source class and gpflags */
416 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
417 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800418 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200419
420 if (csum_offload)
421 {
422 b0->flags |= csum_flags;
423 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
424 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
425 }
426 /* IPv4 UDP checksum only if checksum offload is used */
427 else if (is_ip4)
428 {
429 ip_csum_t sum0 = ip4_0->checksum;
430 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
431 length /* changed member */ );
432 if (PREDICT_FALSE (ip4_0_tos))
433 {
434 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
435 tos /* changed member */ );
436 }
437 ip4_0->checksum = ip_csum_fold (sum0);
438 }
439 /* IPv6 UDP checksum is mandatory */
440 else
441 {
442 int bogus = 0;
443
444 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
445 (vm, b0, ip6_0, &bogus);
446 ASSERT (bogus == 0);
447 if (udp0->checksum == 0)
448 udp0->checksum = 0xffff;
449 }
450
451 vlib_increment_combined_counter (tx_counter, thread_index,
452 sw_if_index0, 1, len0);
453 pkts_encapsulated++;
454
455 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
456 {
457 vxlan_gbp_encap_trace_t *tr =
458 vlib_add_trace (vm, node, b0, sizeof (*tr));
459 tr->tunnel_index = t0 - vxm->tunnels;
460 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800461 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800462 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200463 }
464 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
465 to_next, n_left_to_next,
466 bi0, next0);
467 }
468
469 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
470 }
471
472 /* Do we still need this now that tunnel tx stats is kept? */
473 vlib_node_increment_counter (vm, node->node_index,
474 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
475 pkts_encapsulated);
476
477 return from_frame->n_vectors;
478}
479
Filip Tehlare1714d32019-03-05 03:01:43 -0800480VLIB_NODE_FN (vxlan4_gbp_encap_node) (vlib_main_t * vm,
481 vlib_node_runtime_t * node,
482 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200483{
484 /* Disable chksum offload as setup overhead in tx node is not worthwhile
485 for ip4 header checksum only, unless udp checksum is also required */
486 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
487 /* csum_offload */ 0);
488}
489
Filip Tehlare1714d32019-03-05 03:01:43 -0800490VLIB_NODE_FN (vxlan6_gbp_encap_node) (vlib_main_t * vm,
491 vlib_node_runtime_t * node,
492 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200493{
494 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
495 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
496 /* csum_offload */ 1);
497}
498
499/* *INDENT-OFF* */
500VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
501{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200502 .name = "vxlan4-gbp-encap",
503 .vector_size = sizeof (u32),
504 .format_trace = format_vxlan_gbp_encap_trace,
505 .type = VLIB_NODE_TYPE_INTERNAL,
506 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
507 .error_strings = vxlan_gbp_encap_error_strings,
508 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
509 .next_nodes = {
510 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
511 },
512};
513
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200514VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
515{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200516 .name = "vxlan6-gbp-encap",
517 .vector_size = sizeof (u32),
518 .format_trace = format_vxlan_gbp_encap_trace,
519 .type = VLIB_NODE_TYPE_INTERNAL,
520 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
521 .error_strings = vxlan_gbp_encap_error_strings,
522 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
523 .next_nodes = {
524 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
525 },
526};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200527/* *INDENT-ON* */
528
529/*
530 * fd.io coding-style-patch-verification: ON
531 *
532 * Local Variables:
533 * eval: (c-set-style "gnu")
534 * End:
535 */