blob: f8fc9b4b998026858e39230f7d9213ee45cb5ba6 [file] [log] [blame]
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/vxlan-gbp/vxlan_gbp.h>
21#include <vnet/qos/qos_types.h>
22#include <vnet/adj/rewrite.h>
23
24/* Statistics (not all errors) */
25#define foreach_vxlan_gbp_encap_error \
26_(ENCAPSULATED, "good packets encapsulated")
27
28static char *vxlan_gbp_encap_error_strings[] = {
29#define _(sym,string) string,
30 foreach_vxlan_gbp_encap_error
31#undef _
32};
33
34typedef enum
35{
36#define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
37 foreach_vxlan_gbp_encap_error
38#undef _
39 VXLAN_GBP_ENCAP_N_ERROR,
40} vxlan_gbp_encap_error_t;
41
42typedef enum
43{
44 VXLAN_GBP_ENCAP_NEXT_DROP,
45 VXLAN_GBP_ENCAP_N_NEXT,
46} vxlan_gbp_encap_next_t;
47
48typedef struct
49{
50 u32 tunnel_index;
51 u32 vni;
52 u16 sclass;
Neale Ranns45db8852019-01-09 00:04:04 -080053 u8 flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020054} vxlan_gbp_encap_trace_t;
55
56u8 *
57format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
58{
59 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
60 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61 vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
62
Neale Ranns45db8852019-01-09 00:04:04 -080063 s =
64 format (s,
65 "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %d",
66 t->tunnel_index, t->vni, t->sclass, t->flags);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020067 return s;
68}
69
70always_inline uword
71vxlan_gbp_encap_inline (vlib_main_t * vm,
72 vlib_node_runtime_t * node,
73 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
74{
75 u32 n_left_from, next_index, *from, *to_next;
76 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
77 vnet_main_t *vnm = vxm->vnet_main;
78 vnet_interface_main_t *im = &vnm->interface_main;
79 vlib_combined_counter_main_t *tx_counter =
80 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
81 u32 pkts_encapsulated = 0;
82 u32 thread_index = vlib_get_thread_index ();
83 u32 sw_if_index0 = 0, sw_if_index1 = 0;
84 u32 next0 = 0, next1 = 0;
85 vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
86 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
87
88 from = vlib_frame_vector_args (from_frame);
89 n_left_from = from_frame->n_vectors;
90
91 next_index = node->cached_next_index;
92
93 STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
94 STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
95
96 u8 const underlay_hdr_len = is_ip4 ?
97 sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
98 u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
99 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
100 u32 const csum_flags = is_ip4 ?
101 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
102 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM : VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
103
104 while (n_left_from > 0)
105 {
106 u32 n_left_to_next;
107
108 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
109
110 while (n_left_from >= 4 && n_left_to_next >= 2)
111 {
112 /* Prefetch next iteration. */
113 {
114 vlib_buffer_t *p2, *p3;
115
116 p2 = vlib_get_buffer (vm, from[2]);
117 p3 = vlib_get_buffer (vm, from[3]);
118
119 vlib_prefetch_buffer_header (p2, LOAD);
120 vlib_prefetch_buffer_header (p3, LOAD);
121
122 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
123 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
124 }
125
126 u32 bi0 = to_next[0] = from[0];
127 u32 bi1 = to_next[1] = from[1];
128 from += 2;
129 to_next += 2;
130 n_left_to_next -= 2;
131 n_left_from -= 2;
132
133 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
134 vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
135 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
136 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
137
138 /* Get next node index and adj index from tunnel next_dpo */
139 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
140 {
141 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
142 vnet_hw_interface_t *hi0 =
143 vnet_get_sup_hw_interface (vnm, sw_if_index0);
144 t0 = &vxm->tunnels[hi0->dev_instance];
145 /* Note: change to always set next0 if it may set to drop */
146 next0 = t0->next_dpo.dpoi_next_node;
147 dpoi_idx0 = t0->next_dpo.dpoi_index;
148 }
149
150 /* Get next node index and adj index from tunnel next_dpo */
151 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
152 {
153 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
154 {
155 sw_if_index1 = sw_if_index0;
156 t1 = t0;
157 next1 = next0;
158 dpoi_idx1 = dpoi_idx0;
159 }
160 else
161 {
162 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
163 vnet_hw_interface_t *hi1 =
164 vnet_get_sup_hw_interface (vnm, sw_if_index1);
165 t1 = &vxm->tunnels[hi1->dev_instance];
166 /* Note: change to always set next1 if it may set to drop */
167 next1 = t1->next_dpo.dpoi_next_node;
168 dpoi_idx1 = t1->next_dpo.dpoi_index;
169 }
170 }
171
172 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
173 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
174
175 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
176 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
177
178 vlib_buffer_advance (b0, -underlay_hdr_len);
179 vlib_buffer_advance (b1, -underlay_hdr_len);
180
181 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
182 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
183 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
184 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
185
186 void *underlay0 = vlib_buffer_get_current (b0);
187 void *underlay1 = vlib_buffer_get_current (b1);
188
189 /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
190 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
191 * use memcpy as a workaround */
Dave Barach178cf492018-11-13 16:34:13 -0500192 clib_memcpy_fast (underlay0,
193 t0->rewrite_header.data + rw_hdr_offset,
194 underlay_hdr_len);
195 clib_memcpy_fast (underlay1,
196 t1->rewrite_header.data + rw_hdr_offset,
197 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200198
199 ip4_header_t *ip4_0, *ip4_1;
200 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
201 ip6_header_t *ip6_0, *ip6_1;
202 udp_header_t *udp0, *udp1;
203 vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
204 u8 *l3_0, *l3_1;
205 if (is_ip4)
206 {
207 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
208 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
209
210 /* Fix the IP4 checksum and length */
211 ip4_0 = &hdr0->ip4;
212 ip4_1 = &hdr1->ip4;
213 ip4_0->length = clib_host_to_net_u16 (len0);
214 ip4_1->length = clib_host_to_net_u16 (len1);
215
216 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
217 {
218 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
219 ip4_0->tos = ip4_0_tos;
220 }
221 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
222 {
223 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
224 ip4_1->tos = ip4_1_tos;
225 }
226
227 l3_0 = (u8 *) ip4_0;
228 l3_1 = (u8 *) ip4_1;
229 udp0 = &hdr0->udp;
230 udp1 = &hdr1->udp;
231 vxlan_gbp0 = &hdr0->vxlan_gbp;
232 vxlan_gbp1 = &hdr1->vxlan_gbp;
233 }
234 else /* ipv6 */
235 {
236 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
237 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
238
239 /* Fix IP6 payload length */
240 ip6_0 = &hdr0->ip6;
241 ip6_1 = &hdr1->ip6;
242 ip6_0->payload_length = payload_l0;
243 ip6_1->payload_length = payload_l1;
244
245 l3_0 = (u8 *) ip6_0;
246 l3_1 = (u8 *) ip6_1;
247 udp0 = &hdr0->udp;
248 udp1 = &hdr1->udp;
249 vxlan_gbp0 = &hdr0->vxlan_gbp;
250 vxlan_gbp1 = &hdr1->vxlan_gbp;
251 }
252
253 /* Fix UDP length and set source port */
254 udp0->length = payload_l0;
255 udp0->src_port = flow_hash0;
256 udp1->length = payload_l1;
257 udp1->src_port = flow_hash1;
258
259 /* set source class and gpflags */
260 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
261 vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
262 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800263 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200264 vxlan_gbp1->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800265 clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200266
267 if (csum_offload)
268 {
269 b0->flags |= csum_flags;
270 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
271 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
272 b1->flags |= csum_flags;
273 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
274 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
275 }
276 /* IPv4 UDP checksum only if checksum offload is used */
277 else if (is_ip4)
278 {
279 ip_csum_t sum0 = ip4_0->checksum;
280 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
281 length /* changed member */ );
282 if (PREDICT_FALSE (ip4_0_tos))
283 {
284 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
285 tos /* changed member */ );
286 }
287 ip4_0->checksum = ip_csum_fold (sum0);
288 ip_csum_t sum1 = ip4_1->checksum;
289 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
290 length /* changed member */ );
291 if (PREDICT_FALSE (ip4_1_tos))
292 {
293 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
294 tos /* changed member */ );
295 }
296 ip4_1->checksum = ip_csum_fold (sum1);
297 }
298 /* IPv6 UDP checksum is mandatory */
299 else
300 {
301 int bogus = 0;
302
303 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
304 (vm, b0, ip6_0, &bogus);
305 ASSERT (bogus == 0);
306 if (udp0->checksum == 0)
307 udp0->checksum = 0xffff;
308 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
309 (vm, b1, ip6_1, &bogus);
310 ASSERT (bogus == 0);
311 if (udp1->checksum == 0)
312 udp1->checksum = 0xffff;
313 }
314
315 vlib_increment_combined_counter (tx_counter, thread_index,
316 sw_if_index0, 1, len0);
317 vlib_increment_combined_counter (tx_counter, thread_index,
318 sw_if_index1, 1, len1);
319 pkts_encapsulated += 2;
320
321 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
322 {
323 vxlan_gbp_encap_trace_t *tr =
324 vlib_add_trace (vm, node, b0, sizeof (*tr));
325 tr->tunnel_index = t0 - vxm->tunnels;
326 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800327 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800328 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200329 }
330
331 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
332 {
333 vxlan_gbp_encap_trace_t *tr =
334 vlib_add_trace (vm, node, b1, sizeof (*tr));
335 tr->tunnel_index = t1 - vxm->tunnels;
336 tr->vni = t1->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800337 tr->sclass = vnet_buffer2 (b1)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800338 tr->flags = vnet_buffer2 (b1)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200339 }
340
341 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
342 to_next, n_left_to_next,
343 bi0, bi1, next0, next1);
344 }
345
346 while (n_left_from > 0 && n_left_to_next > 0)
347 {
348 u32 bi0 = to_next[0] = from[0];
349 from += 1;
350 to_next += 1;
351 n_left_from -= 1;
352 n_left_to_next -= 1;
353
354 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
355 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
356
357 /* Get next node index and adj index from tunnel next_dpo */
358 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
359 {
360 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
361 vnet_hw_interface_t *hi0 =
362 vnet_get_sup_hw_interface (vnm, sw_if_index0);
363 t0 = &vxm->tunnels[hi0->dev_instance];
364 /* Note: change to always set next0 if it may be set to drop */
365 next0 = t0->next_dpo.dpoi_next_node;
366 dpoi_idx0 = t0->next_dpo.dpoi_index;
367 }
368 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
369
370 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
371
372 vlib_buffer_advance (b0, -underlay_hdr_len);
373 void *underlay0 = vlib_buffer_get_current (b0);
374
375 /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
376 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
377 * use memcpy as a workaround */
Dave Barach178cf492018-11-13 16:34:13 -0500378 clib_memcpy_fast (underlay0,
379 t0->rewrite_header.data + rw_hdr_offset,
380 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200381
382 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
383 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
384
385 vxlan_gbp_header_t *vxlan_gbp0;
386 udp_header_t *udp0;
387 ip4_header_t *ip4_0;
388 qos_bits_t ip4_0_tos = 0;
389 ip6_header_t *ip6_0;
390 u8 *l3_0;
391 if (is_ip4)
392 {
393 ip4_vxlan_gbp_header_t *hdr = underlay0;
394
395 /* Fix the IP4 checksum and length */
396 ip4_0 = &hdr->ip4;
397 ip4_0->length = clib_host_to_net_u16 (len0);
398
399 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
400 {
401 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
402 ip4_0->tos = ip4_0_tos;
403 }
404
405 l3_0 = (u8 *) ip4_0;
406 udp0 = &hdr->udp;
407 vxlan_gbp0 = &hdr->vxlan_gbp;
408 }
409 else /* ip6 path */
410 {
411 ip6_vxlan_gbp_header_t *hdr = underlay0;
412
413 /* Fix IP6 payload length */
414 ip6_0 = &hdr->ip6;
415 ip6_0->payload_length = payload_l0;
416
417 l3_0 = (u8 *) ip6_0;
418 udp0 = &hdr->udp;
419 vxlan_gbp0 = &hdr->vxlan_gbp;
420 }
421
422 /* Fix UDP length and set source port */
423 udp0->length = payload_l0;
424 udp0->src_port = flow_hash0;
425
426 /* set source class and gpflags */
427 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
428 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800429 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200430
431 if (csum_offload)
432 {
433 b0->flags |= csum_flags;
434 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
435 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
436 }
437 /* IPv4 UDP checksum only if checksum offload is used */
438 else if (is_ip4)
439 {
440 ip_csum_t sum0 = ip4_0->checksum;
441 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
442 length /* changed member */ );
443 if (PREDICT_FALSE (ip4_0_tos))
444 {
445 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
446 tos /* changed member */ );
447 }
448 ip4_0->checksum = ip_csum_fold (sum0);
449 }
450 /* IPv6 UDP checksum is mandatory */
451 else
452 {
453 int bogus = 0;
454
455 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
456 (vm, b0, ip6_0, &bogus);
457 ASSERT (bogus == 0);
458 if (udp0->checksum == 0)
459 udp0->checksum = 0xffff;
460 }
461
462 vlib_increment_combined_counter (tx_counter, thread_index,
463 sw_if_index0, 1, len0);
464 pkts_encapsulated++;
465
466 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
467 {
468 vxlan_gbp_encap_trace_t *tr =
469 vlib_add_trace (vm, node, b0, sizeof (*tr));
470 tr->tunnel_index = t0 - vxm->tunnels;
471 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800472 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800473 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200474 }
475 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
476 to_next, n_left_to_next,
477 bi0, next0);
478 }
479
480 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
481 }
482
483 /* Do we still need this now that tunnel tx stats is kept? */
484 vlib_node_increment_counter (vm, node->node_index,
485 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
486 pkts_encapsulated);
487
488 return from_frame->n_vectors;
489}
490
491static uword
492vxlan4_gbp_encap (vlib_main_t * vm,
493 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
494{
495 /* Disable chksum offload as setup overhead in tx node is not worthwhile
496 for ip4 header checksum only, unless udp checksum is also required */
497 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
498 /* csum_offload */ 0);
499}
500
501static uword
502vxlan6_gbp_encap (vlib_main_t * vm,
503 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
504{
505 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
506 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
507 /* csum_offload */ 1);
508}
509
510/* *INDENT-OFF* */
511VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
512{
513 .function = vxlan4_gbp_encap,
514 .name = "vxlan4-gbp-encap",
515 .vector_size = sizeof (u32),
516 .format_trace = format_vxlan_gbp_encap_trace,
517 .type = VLIB_NODE_TYPE_INTERNAL,
518 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
519 .error_strings = vxlan_gbp_encap_error_strings,
520 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
521 .next_nodes = {
522 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
523 },
524};
525
526VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gbp_encap_node, vxlan4_gbp_encap)
527
528VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
529{
530 .function = vxlan6_gbp_encap,
531 .name = "vxlan6-gbp-encap",
532 .vector_size = sizeof (u32),
533 .format_trace = format_vxlan_gbp_encap_trace,
534 .type = VLIB_NODE_TYPE_INTERNAL,
535 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
536 .error_strings = vxlan_gbp_encap_error_strings,
537 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
538 .next_nodes = {
539 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
540 },
541};
542
543VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gbp_encap_node, vxlan6_gbp_encap)
544/* *INDENT-ON* */
545
546/*
547 * fd.io coding-style-patch-verification: ON
548 *
549 * Local Variables:
550 * eval: (c-set-style "gnu")
551 * End:
552 */