blob: 85d6f7787c47a31e94264ecb3d01223ab17feaef [file] [log] [blame]
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/vxlan-gbp/vxlan_gbp.h>
21#include <vnet/qos/qos_types.h>
22#include <vnet/adj/rewrite.h>
23
24/* Statistics (not all errors) */
25#define foreach_vxlan_gbp_encap_error \
26_(ENCAPSULATED, "good packets encapsulated")
27
28static char *vxlan_gbp_encap_error_strings[] = {
29#define _(sym,string) string,
30 foreach_vxlan_gbp_encap_error
31#undef _
32};
33
34typedef enum
35{
36#define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
37 foreach_vxlan_gbp_encap_error
38#undef _
39 VXLAN_GBP_ENCAP_N_ERROR,
40} vxlan_gbp_encap_error_t;
41
42typedef enum
43{
44 VXLAN_GBP_ENCAP_NEXT_DROP,
45 VXLAN_GBP_ENCAP_N_NEXT,
46} vxlan_gbp_encap_next_t;
47
48typedef struct
49{
50 u32 tunnel_index;
51 u32 vni;
52 u16 sclass;
Neale Ranns45db8852019-01-09 00:04:04 -080053 u8 flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020054} vxlan_gbp_encap_trace_t;
55
Filip Tehlare1714d32019-03-05 03:01:43 -080056#ifndef CLIB_MARCH_VARIANT
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020057u8 *
58format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
59{
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
63
Neale Ranns45db8852019-01-09 00:04:04 -080064 s =
65 format (s,
Neale Ranns0e967e02019-03-28 08:01:47 -070066 "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %U",
67 t->tunnel_index, t->vni, t->sclass,
68 format_vxlan_gbp_header_gpflags, t->flags);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020069 return s;
70}
Filip Tehlare1714d32019-03-05 03:01:43 -080071#endif /* CLIB_MARCH_VARIANT */
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020072
73always_inline uword
74vxlan_gbp_encap_inline (vlib_main_t * vm,
75 vlib_node_runtime_t * node,
76 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
77{
78 u32 n_left_from, next_index, *from, *to_next;
79 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
80 vnet_main_t *vnm = vxm->vnet_main;
81 vnet_interface_main_t *im = &vnm->interface_main;
82 vlib_combined_counter_main_t *tx_counter =
83 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
84 u32 pkts_encapsulated = 0;
85 u32 thread_index = vlib_get_thread_index ();
86 u32 sw_if_index0 = 0, sw_if_index1 = 0;
87 u32 next0 = 0, next1 = 0;
88 vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
89 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
90
91 from = vlib_frame_vector_args (from_frame);
92 n_left_from = from_frame->n_vectors;
93
94 next_index = node->cached_next_index;
95
96 STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
97 STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
98
99 u8 const underlay_hdr_len = is_ip4 ?
100 sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200101 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
102 u32 const csum_flags = is_ip4 ?
103 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
104 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM : VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
105
106 while (n_left_from > 0)
107 {
108 u32 n_left_to_next;
109
110 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
111
112 while (n_left_from >= 4 && n_left_to_next >= 2)
113 {
114 /* Prefetch next iteration. */
115 {
116 vlib_buffer_t *p2, *p3;
117
118 p2 = vlib_get_buffer (vm, from[2]);
119 p3 = vlib_get_buffer (vm, from[3]);
120
121 vlib_prefetch_buffer_header (p2, LOAD);
122 vlib_prefetch_buffer_header (p3, LOAD);
123
124 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
125 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
126 }
127
128 u32 bi0 = to_next[0] = from[0];
129 u32 bi1 = to_next[1] = from[1];
130 from += 2;
131 to_next += 2;
132 n_left_to_next -= 2;
133 n_left_from -= 2;
134
135 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
136 vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
137 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
138 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
139
140 /* Get next node index and adj index from tunnel next_dpo */
141 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
142 {
143 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
144 vnet_hw_interface_t *hi0 =
145 vnet_get_sup_hw_interface (vnm, sw_if_index0);
146 t0 = &vxm->tunnels[hi0->dev_instance];
147 /* Note: change to always set next0 if it may set to drop */
148 next0 = t0->next_dpo.dpoi_next_node;
149 dpoi_idx0 = t0->next_dpo.dpoi_index;
150 }
151
152 /* Get next node index and adj index from tunnel next_dpo */
153 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
154 {
155 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
156 {
157 sw_if_index1 = sw_if_index0;
158 t1 = t0;
159 next1 = next0;
160 dpoi_idx1 = dpoi_idx0;
161 }
162 else
163 {
164 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
165 vnet_hw_interface_t *hi1 =
166 vnet_get_sup_hw_interface (vnm, sw_if_index1);
167 t1 = &vxm->tunnels[hi1->dev_instance];
168 /* Note: change to always set next1 if it may set to drop */
169 next1 = t1->next_dpo.dpoi_next_node;
170 dpoi_idx1 = t1->next_dpo.dpoi_index;
171 }
172 }
173
174 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
175 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
176
177 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
178 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
Benoît Ganne4af1a7f2019-03-01 14:14:10 +0100179 vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0),
180 vlib_buffer_get_current (b1),
181 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200182
183 vlib_buffer_advance (b0, -underlay_hdr_len);
184 vlib_buffer_advance (b1, -underlay_hdr_len);
185
186 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
187 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
188 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
189 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
190
191 void *underlay0 = vlib_buffer_get_current (b0);
192 void *underlay1 = vlib_buffer_get_current (b1);
193
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200194 ip4_header_t *ip4_0, *ip4_1;
195 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
196 ip6_header_t *ip6_0, *ip6_1;
197 udp_header_t *udp0, *udp1;
198 vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
199 u8 *l3_0, *l3_1;
200 if (is_ip4)
201 {
202 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
203 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
204
205 /* Fix the IP4 checksum and length */
206 ip4_0 = &hdr0->ip4;
207 ip4_1 = &hdr1->ip4;
208 ip4_0->length = clib_host_to_net_u16 (len0);
209 ip4_1->length = clib_host_to_net_u16 (len1);
210
211 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
212 {
213 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
214 ip4_0->tos = ip4_0_tos;
215 }
216 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
217 {
218 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
219 ip4_1->tos = ip4_1_tos;
220 }
221
222 l3_0 = (u8 *) ip4_0;
223 l3_1 = (u8 *) ip4_1;
224 udp0 = &hdr0->udp;
225 udp1 = &hdr1->udp;
226 vxlan_gbp0 = &hdr0->vxlan_gbp;
227 vxlan_gbp1 = &hdr1->vxlan_gbp;
228 }
229 else /* ipv6 */
230 {
231 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
232 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
233
234 /* Fix IP6 payload length */
235 ip6_0 = &hdr0->ip6;
236 ip6_1 = &hdr1->ip6;
237 ip6_0->payload_length = payload_l0;
238 ip6_1->payload_length = payload_l1;
239
240 l3_0 = (u8 *) ip6_0;
241 l3_1 = (u8 *) ip6_1;
242 udp0 = &hdr0->udp;
243 udp1 = &hdr1->udp;
244 vxlan_gbp0 = &hdr0->vxlan_gbp;
245 vxlan_gbp1 = &hdr1->vxlan_gbp;
246 }
247
248 /* Fix UDP length and set source port */
249 udp0->length = payload_l0;
250 udp0->src_port = flow_hash0;
251 udp1->length = payload_l1;
252 udp1->src_port = flow_hash1;
253
254 /* set source class and gpflags */
255 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
256 vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
257 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800258 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200259 vxlan_gbp1->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800260 clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200261
262 if (csum_offload)
263 {
264 b0->flags |= csum_flags;
265 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
266 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
267 b1->flags |= csum_flags;
268 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
269 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
270 }
271 /* IPv4 UDP checksum only if checksum offload is used */
272 else if (is_ip4)
273 {
274 ip_csum_t sum0 = ip4_0->checksum;
275 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
276 length /* changed member */ );
277 if (PREDICT_FALSE (ip4_0_tos))
278 {
279 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
280 tos /* changed member */ );
281 }
282 ip4_0->checksum = ip_csum_fold (sum0);
283 ip_csum_t sum1 = ip4_1->checksum;
284 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
285 length /* changed member */ );
286 if (PREDICT_FALSE (ip4_1_tos))
287 {
288 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
289 tos /* changed member */ );
290 }
291 ip4_1->checksum = ip_csum_fold (sum1);
292 }
293 /* IPv6 UDP checksum is mandatory */
294 else
295 {
296 int bogus = 0;
297
298 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
299 (vm, b0, ip6_0, &bogus);
300 ASSERT (bogus == 0);
301 if (udp0->checksum == 0)
302 udp0->checksum = 0xffff;
303 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
304 (vm, b1, ip6_1, &bogus);
305 ASSERT (bogus == 0);
306 if (udp1->checksum == 0)
307 udp1->checksum = 0xffff;
308 }
309
310 vlib_increment_combined_counter (tx_counter, thread_index,
311 sw_if_index0, 1, len0);
312 vlib_increment_combined_counter (tx_counter, thread_index,
313 sw_if_index1, 1, len1);
314 pkts_encapsulated += 2;
315
316 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
317 {
318 vxlan_gbp_encap_trace_t *tr =
319 vlib_add_trace (vm, node, b0, sizeof (*tr));
320 tr->tunnel_index = t0 - vxm->tunnels;
321 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800322 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800323 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200324 }
325
326 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
327 {
328 vxlan_gbp_encap_trace_t *tr =
329 vlib_add_trace (vm, node, b1, sizeof (*tr));
330 tr->tunnel_index = t1 - vxm->tunnels;
331 tr->vni = t1->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800332 tr->sclass = vnet_buffer2 (b1)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800333 tr->flags = vnet_buffer2 (b1)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200334 }
335
336 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
337 to_next, n_left_to_next,
338 bi0, bi1, next0, next1);
339 }
340
341 while (n_left_from > 0 && n_left_to_next > 0)
342 {
343 u32 bi0 = to_next[0] = from[0];
344 from += 1;
345 to_next += 1;
346 n_left_from -= 1;
347 n_left_to_next -= 1;
348
349 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
350 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
351
352 /* Get next node index and adj index from tunnel next_dpo */
353 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
354 {
355 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
356 vnet_hw_interface_t *hi0 =
357 vnet_get_sup_hw_interface (vnm, sw_if_index0);
358 t0 = &vxm->tunnels[hi0->dev_instance];
359 /* Note: change to always set next0 if it may be set to drop */
360 next0 = t0->next_dpo.dpoi_next_node;
361 dpoi_idx0 = t0->next_dpo.dpoi_index;
362 }
363 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
364
365 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
Benoît Ganne4af1a7f2019-03-01 14:14:10 +0100366 vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0),
367 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200368
369 vlib_buffer_advance (b0, -underlay_hdr_len);
370 void *underlay0 = vlib_buffer_get_current (b0);
371
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200372 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
373 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
374
375 vxlan_gbp_header_t *vxlan_gbp0;
376 udp_header_t *udp0;
377 ip4_header_t *ip4_0;
378 qos_bits_t ip4_0_tos = 0;
379 ip6_header_t *ip6_0;
380 u8 *l3_0;
381 if (is_ip4)
382 {
383 ip4_vxlan_gbp_header_t *hdr = underlay0;
384
385 /* Fix the IP4 checksum and length */
386 ip4_0 = &hdr->ip4;
387 ip4_0->length = clib_host_to_net_u16 (len0);
388
389 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
390 {
391 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
392 ip4_0->tos = ip4_0_tos;
393 }
394
395 l3_0 = (u8 *) ip4_0;
396 udp0 = &hdr->udp;
397 vxlan_gbp0 = &hdr->vxlan_gbp;
398 }
399 else /* ip6 path */
400 {
401 ip6_vxlan_gbp_header_t *hdr = underlay0;
402
403 /* Fix IP6 payload length */
404 ip6_0 = &hdr->ip6;
405 ip6_0->payload_length = payload_l0;
406
407 l3_0 = (u8 *) ip6_0;
408 udp0 = &hdr->udp;
409 vxlan_gbp0 = &hdr->vxlan_gbp;
410 }
411
412 /* Fix UDP length and set source port */
413 udp0->length = payload_l0;
414 udp0->src_port = flow_hash0;
415
416 /* set source class and gpflags */
417 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
418 vxlan_gbp0->sclass =
Neale Ranns879d11c2019-01-21 23:34:18 -0800419 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200420
421 if (csum_offload)
422 {
423 b0->flags |= csum_flags;
424 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
425 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
426 }
427 /* IPv4 UDP checksum only if checksum offload is used */
428 else if (is_ip4)
429 {
430 ip_csum_t sum0 = ip4_0->checksum;
431 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
432 length /* changed member */ );
433 if (PREDICT_FALSE (ip4_0_tos))
434 {
435 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
436 tos /* changed member */ );
437 }
438 ip4_0->checksum = ip_csum_fold (sum0);
439 }
440 /* IPv6 UDP checksum is mandatory */
441 else
442 {
443 int bogus = 0;
444
445 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
446 (vm, b0, ip6_0, &bogus);
447 ASSERT (bogus == 0);
448 if (udp0->checksum == 0)
449 udp0->checksum = 0xffff;
450 }
451
452 vlib_increment_combined_counter (tx_counter, thread_index,
453 sw_if_index0, 1, len0);
454 pkts_encapsulated++;
455
456 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
457 {
458 vxlan_gbp_encap_trace_t *tr =
459 vlib_add_trace (vm, node, b0, sizeof (*tr));
460 tr->tunnel_index = t0 - vxm->tunnels;
461 tr->vni = t0->vni;
Neale Ranns879d11c2019-01-21 23:34:18 -0800462 tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
Neale Ranns45db8852019-01-09 00:04:04 -0800463 tr->flags = vnet_buffer2 (b0)->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200464 }
465 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
466 to_next, n_left_to_next,
467 bi0, next0);
468 }
469
470 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
471 }
472
473 /* Do we still need this now that tunnel tx stats is kept? */
474 vlib_node_increment_counter (vm, node->node_index,
475 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
476 pkts_encapsulated);
477
478 return from_frame->n_vectors;
479}
480
Filip Tehlare1714d32019-03-05 03:01:43 -0800481VLIB_NODE_FN (vxlan4_gbp_encap_node) (vlib_main_t * vm,
482 vlib_node_runtime_t * node,
483 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200484{
485 /* Disable chksum offload as setup overhead in tx node is not worthwhile
486 for ip4 header checksum only, unless udp checksum is also required */
487 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
488 /* csum_offload */ 0);
489}
490
Filip Tehlare1714d32019-03-05 03:01:43 -0800491VLIB_NODE_FN (vxlan6_gbp_encap_node) (vlib_main_t * vm,
492 vlib_node_runtime_t * node,
493 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200494{
495 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
496 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
497 /* csum_offload */ 1);
498}
499
500/* *INDENT-OFF* */
501VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
502{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200503 .name = "vxlan4-gbp-encap",
504 .vector_size = sizeof (u32),
505 .format_trace = format_vxlan_gbp_encap_trace,
506 .type = VLIB_NODE_TYPE_INTERNAL,
507 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
508 .error_strings = vxlan_gbp_encap_error_strings,
509 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
510 .next_nodes = {
511 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
512 },
513};
514
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200515VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
516{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200517 .name = "vxlan6-gbp-encap",
518 .vector_size = sizeof (u32),
519 .format_trace = format_vxlan_gbp_encap_trace,
520 .type = VLIB_NODE_TYPE_INTERNAL,
521 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
522 .error_strings = vxlan_gbp_encap_error_strings,
523 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
524 .next_nodes = {
525 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
526 },
527};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200528/* *INDENT-ON* */
529
530/*
531 * fd.io coding-style-patch-verification: ON
532 *
533 * Local Variables:
534 * eval: (c-set-style "gnu")
535 * End:
536 */