blob: 59d5c96c9748140c965e53b2c43f6ab30444739d [file] [log] [blame]
Marco Varleseb598f1d2017-09-19 14:25:28 +02001/*
2 * Copyright (c) 2017 SUSE LLC.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/geneve/geneve.h>
21
22/* Statistics (not all errors) */
23#define foreach_geneve_encap_error \
24_(ENCAPSULATED, "good packets encapsulated")
25
26static char *geneve_encap_error_strings[] = {
27#define _(sym,string) string,
28 foreach_geneve_encap_error
29#undef _
30};
31
32typedef enum
33{
34#define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
35 foreach_geneve_encap_error
36#undef _
37 GENEVE_ENCAP_N_ERROR,
38} geneve_encap_error_t;
39
40typedef enum
41{
42 GENEVE_ENCAP_NEXT_DROP,
43 GENEVE_ENCAP_N_NEXT,
44} geneve_encap_next_t;
45
46typedef struct
47{
48 u32 tunnel_index;
49 u32 vni;
50} geneve_encap_trace_t;
51
52u8 *
53format_geneve_encap_trace (u8 * s, va_list * args)
54{
55 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57 geneve_encap_trace_t *t = va_arg (*args, geneve_encap_trace_t *);
58
59 s = format (s, "GENEVE encap to geneve_tunnel%d vni %d",
60 t->tunnel_index, t->vni);
61 return s;
62}
63
64
65#define foreach_fixed_header4_offset \
66 _(0) _(1) _(2) _(3)
67
68#define foreach_fixed_header6_offset \
69 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
70
71always_inline uword
72geneve_encap_inline (vlib_main_t * vm,
73 vlib_node_runtime_t * node,
74 vlib_frame_t * from_frame, u32 is_ip4)
75{
76 u32 n_left_from, next_index, *from, *to_next;
77 geneve_main_t *vxm = &geneve_main;
78 vnet_main_t *vnm = vxm->vnet_main;
79 vnet_interface_main_t *im = &vnm->interface_main;
80 u32 pkts_encapsulated = 0;
81 u16 old_l0 = 0, old_l1 = 0;
82 u32 thread_index = vlib_get_thread_index ();
83 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
84 u32 sw_if_index0 = 0, sw_if_index1 = 0;
85 u32 next0 = 0, next1 = 0;
86 vnet_hw_interface_t *hi0, *hi1;
87 geneve_tunnel_t *t0 = NULL, *t1 = NULL;
88
89 from = vlib_frame_vector_args (from_frame);
90 n_left_from = from_frame->n_vectors;
91
92 next_index = node->cached_next_index;
93 stats_sw_if_index = node->runtime_data[0];
94 stats_n_packets = stats_n_bytes = 0;
95
96 while (n_left_from > 0)
97 {
98 u32 n_left_to_next;
99
100 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
101
102 while (n_left_from >= 4 && n_left_to_next >= 2)
103 {
104 u32 bi0, bi1;
105 vlib_buffer_t *b0, *b1;
106 u32 flow_hash0, flow_hash1;
107 u32 len0, len1;
108 ip4_header_t *ip4_0, *ip4_1;
109 ip6_header_t *ip6_0, *ip6_1;
110 udp_header_t *udp0, *udp1;
111 u64 *copy_src0, *copy_dst0;
112 u64 *copy_src1, *copy_dst1;
113 u32 *copy_src_last0, *copy_dst_last0;
114 u32 *copy_src_last1, *copy_dst_last1;
115 u16 new_l0, new_l1;
116 ip_csum_t sum0, sum1;
117
118 /* Prefetch next iteration. */
119 {
120 vlib_buffer_t *p2, *p3;
121
122 p2 = vlib_get_buffer (vm, from[2]);
123 p3 = vlib_get_buffer (vm, from[3]);
124
125 vlib_prefetch_buffer_header (p2, LOAD);
126 vlib_prefetch_buffer_header (p3, LOAD);
127
128 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
129 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
130 }
131
132 bi0 = from[0];
133 bi1 = from[1];
134 to_next[0] = bi0;
135 to_next[1] = bi1;
136 from += 2;
137 to_next += 2;
138 n_left_to_next -= 2;
139 n_left_from -= 2;
140
141 b0 = vlib_get_buffer (vm, bi0);
142 b1 = vlib_get_buffer (vm, bi1);
143
144 flow_hash0 = vnet_l2_compute_flow_hash (b0);
145 flow_hash1 = vnet_l2_compute_flow_hash (b1);
146
147 /* Get next node index and adj index from tunnel next_dpo */
148 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
149 {
150 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
151 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
152 t0 = &vxm->tunnels[hi0->dev_instance];
153 /* Note: change to always set next0 if it may be set to drop */
154 next0 = t0->next_dpo.dpoi_next_node;
155 }
156 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
157
158 /* Get next node index and adj index from tunnel next_dpo */
159 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
160 {
161 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
162 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
163 t1 = &vxm->tunnels[hi1->dev_instance];
164 /* Note: change to always set next1 if it may be set to drop */
165 next1 = t1->next_dpo.dpoi_next_node;
166 }
167 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
168
169 /* Apply the rewrite string. $$$$ vnet_rewrite? */
170 vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
171 vlib_buffer_advance (b1, -(word) _vec_len (t1->rewrite));
172
173 if (is_ip4)
174 {
175 u8 ip4_geneve_base_header_len =
176 sizeof (ip4_header_t) + sizeof (udp_header_t) +
177 GENEVE_BASE_HEADER_LENGTH;
178 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
179 u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
180#if SUPPORT_OPTIONS_HEADER==1
181 ip4_geneve_header_total_len0 += t0->options_len;
182 ip4_geneve_header_total_len1 += t1->options_len;
183#endif
184 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
185 ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
186
187 ip4_0 = vlib_buffer_get_current (b0);
188 ip4_1 = vlib_buffer_get_current (b1);
189
190 /* Copy the fixed header */
191 copy_dst0 = (u64 *) ip4_0;
192 copy_src0 = (u64 *) t0->rewrite;
193 copy_dst1 = (u64 *) ip4_1;
194 copy_src1 = (u64 *) t1->rewrite;
195 /* Copy first 32 octets 8-bytes at a time */
196#define _(offs) copy_dst0[offs] = copy_src0[offs];
197 foreach_fixed_header4_offset;
198#undef _
199#define _(offs) copy_dst1[offs] = copy_src1[offs];
200 foreach_fixed_header4_offset;
201#undef _
202 /* Last 4 octets. Hopefully gcc will be our friend */
203 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
204 copy_src_last0 = (u32 *) (&copy_src0[4]);
205 copy_dst_last0[0] = copy_src_last0[0];
206 copy_dst_last1 = (u32 *) (&copy_dst1[4]);
207 copy_src_last1 = (u32 *) (&copy_src1[4]);
208 copy_dst_last1[0] = copy_src_last1[0];
209
210 /* Fix the IP4 checksum and length */
211 sum0 = ip4_0->checksum;
212 new_l0 = /* old_l0 always 0, see the rewrite setup */
213 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
214 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
215 length /* changed member */ );
216 ip4_0->checksum = ip_csum_fold (sum0);
217 ip4_0->length = new_l0;
218 sum1 = ip4_1->checksum;
219 new_l1 = /* old_l1 always 0, see the rewrite setup */
220 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
221 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
222 length /* changed member */ );
223 ip4_1->checksum = ip_csum_fold (sum1);
224 ip4_1->length = new_l1;
225
226 /* Fix UDP length and set source port */
227 udp0 = (udp_header_t *) (ip4_0 + 1);
228 new_l0 =
229 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
230 sizeof (*ip4_0));
231 udp0->length = new_l0;
232 udp0->src_port = flow_hash0;
233 udp1 = (udp_header_t *) (ip4_1 + 1);
234 new_l1 =
235 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) -
236 sizeof (*ip4_1));
237 udp1->length = new_l1;
238 udp1->src_port = flow_hash1;
239 }
240 else /* ipv6 */
241 {
242 int bogus = 0;
243
244 u8 ip6_geneve_base_header_len =
245 sizeof (ip6_header_t) + sizeof (udp_header_t) +
246 GENEVE_BASE_HEADER_LENGTH;
247 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
248 u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
249#if SUPPORT_OPTIONS_HEADER==1
250 ip6_geneve_header_total_len0 += t0->options_len;
251 ip6_geneve_header_total_len1 += t1->options_len;
252#endif
253 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
254 ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
255
256 ip6_0 = vlib_buffer_get_current (b0);
257 ip6_1 = vlib_buffer_get_current (b1);
258
259 /* Copy the fixed header */
260 copy_dst0 = (u64 *) ip6_0;
261 copy_src0 = (u64 *) t0->rewrite;
262 copy_dst1 = (u64 *) ip6_1;
263 copy_src1 = (u64 *) t1->rewrite;
264 /* Copy first 56 (ip6) octets 8-bytes at a time */
265#define _(offs) copy_dst0[offs] = copy_src0[offs];
266 foreach_fixed_header6_offset;
267#undef _
268#define _(offs) copy_dst1[offs] = copy_src1[offs];
269 foreach_fixed_header6_offset;
270#undef _
271 /* Fix IP6 payload length */
272 new_l0 =
273 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
274 - sizeof (*ip6_0));
275 ip6_0->payload_length = new_l0;
276 new_l1 =
277 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
278 - sizeof (*ip6_1));
279 ip6_1->payload_length = new_l1;
280
281 /* Fix UDP length and set source port */
282 udp0 = (udp_header_t *) (ip6_0 + 1);
283 udp0->length = new_l0;
284 udp0->src_port = flow_hash0;
285 udp1 = (udp_header_t *) (ip6_1 + 1);
286 udp1->length = new_l1;
287 udp1->src_port = flow_hash1;
288
289 /* IPv6 UDP checksum is mandatory */
290 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
291 ip6_0,
292 &bogus);
293 ASSERT (bogus == 0);
294 if (udp0->checksum == 0)
295 udp0->checksum = 0xffff;
296 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b1,
297 ip6_1,
298 &bogus);
299 ASSERT (bogus == 0);
300 if (udp1->checksum == 0)
301 udp1->checksum = 0xffff;
302 }
303
304 pkts_encapsulated += 2;
305 len0 = vlib_buffer_length_in_chain (vm, b0);
306 len1 = vlib_buffer_length_in_chain (vm, b1);
307 stats_n_packets += 2;
308 stats_n_bytes += len0 + len1;
309
310 /* Batch stats increment on the same geneve tunnel so counter is not
311 incremented per packet. Note stats are still incremented for deleted
312 and admin-down tunnel where packets are dropped. It is not worthwhile
313 to check for this rare case and affect normal path performance. */
314 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
315 (sw_if_index1 != stats_sw_if_index)))
316 {
317 stats_n_packets -= 2;
318 stats_n_bytes -= len0 + len1;
319 if (sw_if_index0 == sw_if_index1)
320 {
321 if (stats_n_packets)
322 vlib_increment_combined_counter
323 (im->combined_sw_if_counters +
324 VNET_INTERFACE_COUNTER_TX, thread_index,
325 stats_sw_if_index, stats_n_packets, stats_n_bytes);
326 stats_sw_if_index = sw_if_index0;
327 stats_n_packets = 2;
328 stats_n_bytes = len0 + len1;
329 }
330 else
331 {
332 vlib_increment_combined_counter
333 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
334 thread_index, sw_if_index0, 1, len0);
335 vlib_increment_combined_counter
336 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
337 thread_index, sw_if_index1, 1, len1);
338 }
339 }
340
341 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
342 {
343 geneve_encap_trace_t *tr =
344 vlib_add_trace (vm, node, b0, sizeof (*tr));
345 tr->tunnel_index = t0 - vxm->tunnels;
346 tr->vni = t0->vni;
347 }
348
349 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
350 {
351 geneve_encap_trace_t *tr =
352 vlib_add_trace (vm, node, b1, sizeof (*tr));
353 tr->tunnel_index = t1 - vxm->tunnels;
354 tr->vni = t1->vni;
355 }
356
357 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
358 to_next, n_left_to_next,
359 bi0, bi1, next0, next1);
360 }
361
362 while (n_left_from > 0 && n_left_to_next > 0)
363 {
364 u32 bi0;
365 vlib_buffer_t *b0;
366 u32 flow_hash0;
367 u32 len0;
368 ip4_header_t *ip4_0;
369 ip6_header_t *ip6_0;
370 udp_header_t *udp0;
371 u64 *copy_src0, *copy_dst0;
372 u32 *copy_src_last0, *copy_dst_last0;
373 u16 new_l0;
374 ip_csum_t sum0;
375
376 bi0 = from[0];
377 to_next[0] = bi0;
378 from += 1;
379 to_next += 1;
380 n_left_from -= 1;
381 n_left_to_next -= 1;
382
383 b0 = vlib_get_buffer (vm, bi0);
384
385 flow_hash0 = vnet_l2_compute_flow_hash (b0);
386
387 /* Get next node index and adj index from tunnel next_dpo */
388 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
389 {
390 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
391 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
392 t0 = &vxm->tunnels[hi0->dev_instance];
393 /* Note: change to always set next0 if it may be set to drop */
394 next0 = t0->next_dpo.dpoi_next_node;
395 }
396 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
397
398 /* Apply the rewrite string. $$$$ vnet_rewrite? */
399 vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
400
401 if (is_ip4)
402 {
403 u8 ip4_geneve_base_header_len =
404 sizeof (ip4_header_t) + sizeof (udp_header_t) +
405 GENEVE_BASE_HEADER_LENGTH;
406 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
407#if SUPPORT_OPTIONS_HEADER==1
408 ip4_geneve_header_total_len0 += t0->options_len;
409#endif
410 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
411
412 ip4_0 = vlib_buffer_get_current (b0);
413
414 /* Copy the fixed header */
415 copy_dst0 = (u64 *) ip4_0;
416 copy_src0 = (u64 *) t0->rewrite;
417 /* Copy first 32 octets 8-bytes at a time */
418#define _(offs) copy_dst0[offs] = copy_src0[offs];
419 foreach_fixed_header4_offset;
420#undef _
421 /* Last 4 octets. Hopefully gcc will be our friend */
422 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
423 copy_src_last0 = (u32 *) (&copy_src0[4]);
424 copy_dst_last0[0] = copy_src_last0[0];
425
426 /* Fix the IP4 checksum and length */
427 sum0 = ip4_0->checksum;
428 new_l0 = /* old_l0 always 0, see the rewrite setup */
429 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
430 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
431 length /* changed member */ );
432 ip4_0->checksum = ip_csum_fold (sum0);
433 ip4_0->length = new_l0;
434
435 /* Fix UDP length and set source port */
436 udp0 = (udp_header_t *) (ip4_0 + 1);
437 new_l0 =
438 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
439 sizeof (*ip4_0));
440 udp0->length = new_l0;
441 udp0->src_port = flow_hash0;
442 }
443
444 else /* ip6 path */
445 {
446 int bogus = 0;
447
448 u8 ip6_geneve_base_header_len =
449 sizeof (ip6_header_t) + sizeof (udp_header_t) +
450 GENEVE_BASE_HEADER_LENGTH;
451 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
452#if SUPPORT_OPTIONS_HEADER==1
453 ip6_geneve_header_total_len0 += t0->options_len;
454#endif
455 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
456
457 ip6_0 = vlib_buffer_get_current (b0);
458 /* Copy the fixed header */
459 copy_dst0 = (u64 *) ip6_0;
460 copy_src0 = (u64 *) t0->rewrite;
461 /* Copy first 56 (ip6) octets 8-bytes at a time */
462#define _(offs) copy_dst0[offs] = copy_src0[offs];
463 foreach_fixed_header6_offset;
464#undef _
465 /* Fix IP6 payload length */
466 new_l0 =
467 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
468 - sizeof (*ip6_0));
469 ip6_0->payload_length = new_l0;
470
471 /* Fix UDP length and set source port */
472 udp0 = (udp_header_t *) (ip6_0 + 1);
473 udp0->length = new_l0;
474 udp0->src_port = flow_hash0;
475
476 /* IPv6 UDP checksum is mandatory */
477 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
478 ip6_0,
479 &bogus);
480 ASSERT (bogus == 0);
481 if (udp0->checksum == 0)
482 udp0->checksum = 0xffff;
483 }
484
485 pkts_encapsulated++;
486 len0 = vlib_buffer_length_in_chain (vm, b0);
487 stats_n_packets += 1;
488 stats_n_bytes += len0;
489
490 /* Batch stats increment on the same geneve tunnel so counter is not
491 incremented per packet. Note stats are still incremented for deleted
492 and admin-down tunnel where packets are dropped. It is not worthwhile
493 to check for this rare case and affect normal path performance. */
494 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
495 {
496 stats_n_packets -= 1;
497 stats_n_bytes -= len0;
498 if (stats_n_packets)
499 vlib_increment_combined_counter
500 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
501 thread_index, stats_sw_if_index,
502 stats_n_packets, stats_n_bytes);
503 stats_n_packets = 1;
504 stats_n_bytes = len0;
505 stats_sw_if_index = sw_if_index0;
506 }
507
508 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
509 {
510 geneve_encap_trace_t *tr =
511 vlib_add_trace (vm, node, b0, sizeof (*tr));
512 tr->tunnel_index = t0 - vxm->tunnels;
513 tr->vni = t0->vni;
514 }
515 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
516 to_next, n_left_to_next,
517 bi0, next0);
518 }
519
520 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
521 }
522
523 /* Do we still need this now that tunnel tx stats is kept? */
524 vlib_node_increment_counter (vm, node->node_index,
525 GENEVE_ENCAP_ERROR_ENCAPSULATED,
526 pkts_encapsulated);
527
528 /* Increment any remaining batch stats */
529 if (stats_n_packets)
530 {
531 vlib_increment_combined_counter
532 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
533 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
534 node->runtime_data[0] = stats_sw_if_index;
535 }
536
537 return from_frame->n_vectors;
538}
539
540static uword
541geneve4_encap (vlib_main_t * vm,
542 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
543{
544 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
545}
546
547static uword
548geneve6_encap (vlib_main_t * vm,
549 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
550{
551 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
552}
553
554/* *INDENT-OFF* */
555VLIB_REGISTER_NODE (geneve4_encap_node) = {
556 .function = geneve4_encap,
557 .name = "geneve4-encap",
558 .vector_size = sizeof (u32),
559 .format_trace = format_geneve_encap_trace,
560 .type = VLIB_NODE_TYPE_INTERNAL,
561 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
562 .error_strings = geneve_encap_error_strings,
563 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
564 .next_nodes = {
565 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
566 },
567};
568
569VLIB_NODE_FUNCTION_MULTIARCH (geneve4_encap_node, geneve4_encap)
570
571VLIB_REGISTER_NODE (geneve6_encap_node) = {
572 .function = geneve6_encap,
573 .name = "geneve6-encap",
574 .vector_size = sizeof (u32),
575 .format_trace = format_geneve_encap_trace,
576 .type = VLIB_NODE_TYPE_INTERNAL,
577 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
578 .error_strings = geneve_encap_error_strings,
579 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
580 .next_nodes = {
581 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
582 },
583};
584
585VLIB_NODE_FUNCTION_MULTIARCH (geneve6_encap_node, geneve6_encap)
586/* *INDENT-ON* */
587
588/*
589 * fd.io coding-style-patch-verification: ON
590 *
591 * Local Variables:
592 * eval: (c-set-style "gnu")
593 * End:
594 */