blob: 3d98f9e2ba68df1dbc8cfe07d87dc9d1544fc94a [file] [log] [blame]
Marco Varleseb598f1d2017-09-19 14:25:28 +02001/*
2 * Copyright (c) 2017 SUSE LLC.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/geneve/geneve.h>
21
22/* Statistics (not all errors) */
23#define foreach_geneve_encap_error \
24_(ENCAPSULATED, "good packets encapsulated")
25
26static char *geneve_encap_error_strings[] = {
27#define _(sym,string) string,
28 foreach_geneve_encap_error
29#undef _
30};
31
32typedef enum
33{
34#define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
35 foreach_geneve_encap_error
36#undef _
37 GENEVE_ENCAP_N_ERROR,
38} geneve_encap_error_t;
39
40typedef enum
41{
42 GENEVE_ENCAP_NEXT_DROP,
43 GENEVE_ENCAP_N_NEXT,
44} geneve_encap_next_t;
45
Marco Varleseb598f1d2017-09-19 14:25:28 +020046#define foreach_fixed_header4_offset \
47 _(0) _(1) _(2) _(3)
48
49#define foreach_fixed_header6_offset \
50 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
51
52always_inline uword
53geneve_encap_inline (vlib_main_t * vm,
54 vlib_node_runtime_t * node,
55 vlib_frame_t * from_frame, u32 is_ip4)
56{
57 u32 n_left_from, next_index, *from, *to_next;
58 geneve_main_t *vxm = &geneve_main;
59 vnet_main_t *vnm = vxm->vnet_main;
60 vnet_interface_main_t *im = &vnm->interface_main;
61 u32 pkts_encapsulated = 0;
62 u16 old_l0 = 0, old_l1 = 0;
Damjan Marion067cd622018-07-11 12:47:43 +020063 u32 thread_index = vm->thread_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +020064 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Zhiyong Yange4b202a2018-09-04 21:43:57 -040065 u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
Marco Varleseb598f1d2017-09-19 14:25:28 +020066 u32 next0 = 0, next1 = 0;
67 vnet_hw_interface_t *hi0, *hi1;
68 geneve_tunnel_t *t0 = NULL, *t1 = NULL;
69
70 from = vlib_frame_vector_args (from_frame);
71 n_left_from = from_frame->n_vectors;
72
73 next_index = node->cached_next_index;
74 stats_sw_if_index = node->runtime_data[0];
75 stats_n_packets = stats_n_bytes = 0;
76
77 while (n_left_from > 0)
78 {
79 u32 n_left_to_next;
80
81 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
82
83 while (n_left_from >= 4 && n_left_to_next >= 2)
84 {
85 u32 bi0, bi1;
86 vlib_buffer_t *b0, *b1;
87 u32 flow_hash0, flow_hash1;
88 u32 len0, len1;
89 ip4_header_t *ip4_0, *ip4_1;
90 ip6_header_t *ip6_0, *ip6_1;
91 udp_header_t *udp0, *udp1;
92 u64 *copy_src0, *copy_dst0;
93 u64 *copy_src1, *copy_dst1;
94 u32 *copy_src_last0, *copy_dst_last0;
95 u32 *copy_src_last1, *copy_dst_last1;
96 u16 new_l0, new_l1;
97 ip_csum_t sum0, sum1;
98
99 /* Prefetch next iteration. */
100 {
101 vlib_buffer_t *p2, *p3;
102
103 p2 = vlib_get_buffer (vm, from[2]);
104 p3 = vlib_get_buffer (vm, from[3]);
105
106 vlib_prefetch_buffer_header (p2, LOAD);
107 vlib_prefetch_buffer_header (p3, LOAD);
108
109 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
110 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
111 }
112
113 bi0 = from[0];
114 bi1 = from[1];
115 to_next[0] = bi0;
116 to_next[1] = bi1;
117 from += 2;
118 to_next += 2;
119 n_left_to_next -= 2;
120 n_left_from -= 2;
121
122 b0 = vlib_get_buffer (vm, bi0);
123 b1 = vlib_get_buffer (vm, bi1);
124
125 flow_hash0 = vnet_l2_compute_flow_hash (b0);
126 flow_hash1 = vnet_l2_compute_flow_hash (b1);
127
128 /* Get next node index and adj index from tunnel next_dpo */
129 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
130 {
131 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
132 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
133 t0 = &vxm->tunnels[hi0->dev_instance];
134 /* Note: change to always set next0 if it may be set to drop */
135 next0 = t0->next_dpo.dpoi_next_node;
136 }
Marco Varlese7af4a572017-11-27 16:46:04 +0100137
138 ASSERT (t0 != NULL);
139
Marco Varleseb598f1d2017-09-19 14:25:28 +0200140 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
141
142 /* Get next node index and adj index from tunnel next_dpo */
143 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
144 {
145 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
146 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
147 t1 = &vxm->tunnels[hi1->dev_instance];
148 /* Note: change to always set next1 if it may be set to drop */
149 next1 = t1->next_dpo.dpoi_next_node;
150 }
Marco Varlese7af4a572017-11-27 16:46:04 +0100151
152 ASSERT (t1 != NULL);
153
Marco Varleseb598f1d2017-09-19 14:25:28 +0200154 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
155
156 /* Apply the rewrite string. $$$$ vnet_rewrite? */
157 vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
158 vlib_buffer_advance (b1, -(word) _vec_len (t1->rewrite));
159
160 if (is_ip4)
161 {
162 u8 ip4_geneve_base_header_len =
163 sizeof (ip4_header_t) + sizeof (udp_header_t) +
164 GENEVE_BASE_HEADER_LENGTH;
165 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
166 u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
167#if SUPPORT_OPTIONS_HEADER==1
168 ip4_geneve_header_total_len0 += t0->options_len;
169 ip4_geneve_header_total_len1 += t1->options_len;
170#endif
171 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
172 ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
173
174 ip4_0 = vlib_buffer_get_current (b0);
175 ip4_1 = vlib_buffer_get_current (b1);
176
177 /* Copy the fixed header */
178 copy_dst0 = (u64 *) ip4_0;
179 copy_src0 = (u64 *) t0->rewrite;
180 copy_dst1 = (u64 *) ip4_1;
181 copy_src1 = (u64 *) t1->rewrite;
182 /* Copy first 32 octets 8-bytes at a time */
183#define _(offs) copy_dst0[offs] = copy_src0[offs];
184 foreach_fixed_header4_offset;
185#undef _
186#define _(offs) copy_dst1[offs] = copy_src1[offs];
187 foreach_fixed_header4_offset;
188#undef _
189 /* Last 4 octets. Hopefully gcc will be our friend */
190 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
191 copy_src_last0 = (u32 *) (&copy_src0[4]);
192 copy_dst_last0[0] = copy_src_last0[0];
193 copy_dst_last1 = (u32 *) (&copy_dst1[4]);
194 copy_src_last1 = (u32 *) (&copy_src1[4]);
195 copy_dst_last1[0] = copy_src_last1[0];
196
197 /* Fix the IP4 checksum and length */
198 sum0 = ip4_0->checksum;
199 new_l0 = /* old_l0 always 0, see the rewrite setup */
200 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
201 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
202 length /* changed member */ );
203 ip4_0->checksum = ip_csum_fold (sum0);
204 ip4_0->length = new_l0;
205 sum1 = ip4_1->checksum;
206 new_l1 = /* old_l1 always 0, see the rewrite setup */
207 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
208 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
209 length /* changed member */ );
210 ip4_1->checksum = ip_csum_fold (sum1);
211 ip4_1->length = new_l1;
212
213 /* Fix UDP length and set source port */
214 udp0 = (udp_header_t *) (ip4_0 + 1);
215 new_l0 =
216 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
217 sizeof (*ip4_0));
218 udp0->length = new_l0;
219 udp0->src_port = flow_hash0;
220 udp1 = (udp_header_t *) (ip4_1 + 1);
221 new_l1 =
222 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) -
223 sizeof (*ip4_1));
224 udp1->length = new_l1;
225 udp1->src_port = flow_hash1;
226 }
227 else /* ipv6 */
228 {
229 int bogus = 0;
230
231 u8 ip6_geneve_base_header_len =
232 sizeof (ip6_header_t) + sizeof (udp_header_t) +
233 GENEVE_BASE_HEADER_LENGTH;
234 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
235 u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
236#if SUPPORT_OPTIONS_HEADER==1
237 ip6_geneve_header_total_len0 += t0->options_len;
238 ip6_geneve_header_total_len1 += t1->options_len;
239#endif
240 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
241 ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
242
243 ip6_0 = vlib_buffer_get_current (b0);
244 ip6_1 = vlib_buffer_get_current (b1);
245
246 /* Copy the fixed header */
247 copy_dst0 = (u64 *) ip6_0;
248 copy_src0 = (u64 *) t0->rewrite;
249 copy_dst1 = (u64 *) ip6_1;
250 copy_src1 = (u64 *) t1->rewrite;
251 /* Copy first 56 (ip6) octets 8-bytes at a time */
252#define _(offs) copy_dst0[offs] = copy_src0[offs];
253 foreach_fixed_header6_offset;
254#undef _
255#define _(offs) copy_dst1[offs] = copy_src1[offs];
256 foreach_fixed_header6_offset;
257#undef _
258 /* Fix IP6 payload length */
259 new_l0 =
260 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
261 - sizeof (*ip6_0));
262 ip6_0->payload_length = new_l0;
263 new_l1 =
264 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
265 - sizeof (*ip6_1));
266 ip6_1->payload_length = new_l1;
267
268 /* Fix UDP length and set source port */
269 udp0 = (udp_header_t *) (ip6_0 + 1);
270 udp0->length = new_l0;
271 udp0->src_port = flow_hash0;
272 udp1 = (udp_header_t *) (ip6_1 + 1);
273 udp1->length = new_l1;
274 udp1->src_port = flow_hash1;
275
276 /* IPv6 UDP checksum is mandatory */
277 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
278 ip6_0,
279 &bogus);
280 ASSERT (bogus == 0);
281 if (udp0->checksum == 0)
282 udp0->checksum = 0xffff;
283 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b1,
284 ip6_1,
285 &bogus);
286 ASSERT (bogus == 0);
287 if (udp1->checksum == 0)
288 udp1->checksum = 0xffff;
289 }
290
291 pkts_encapsulated += 2;
292 len0 = vlib_buffer_length_in_chain (vm, b0);
293 len1 = vlib_buffer_length_in_chain (vm, b1);
294 stats_n_packets += 2;
295 stats_n_bytes += len0 + len1;
296
297 /* Batch stats increment on the same geneve tunnel so counter is not
298 incremented per packet. Note stats are still incremented for deleted
299 and admin-down tunnel where packets are dropped. It is not worthwhile
300 to check for this rare case and affect normal path performance. */
301 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
302 (sw_if_index1 != stats_sw_if_index)))
303 {
304 stats_n_packets -= 2;
305 stats_n_bytes -= len0 + len1;
306 if (sw_if_index0 == sw_if_index1)
307 {
308 if (stats_n_packets)
309 vlib_increment_combined_counter
310 (im->combined_sw_if_counters +
311 VNET_INTERFACE_COUNTER_TX, thread_index,
312 stats_sw_if_index, stats_n_packets, stats_n_bytes);
313 stats_sw_if_index = sw_if_index0;
314 stats_n_packets = 2;
315 stats_n_bytes = len0 + len1;
316 }
317 else
318 {
319 vlib_increment_combined_counter
320 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
321 thread_index, sw_if_index0, 1, len0);
322 vlib_increment_combined_counter
323 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
324 thread_index, sw_if_index1, 1, len1);
325 }
326 }
327
328 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
329 {
330 geneve_encap_trace_t *tr =
331 vlib_add_trace (vm, node, b0, sizeof (*tr));
332 tr->tunnel_index = t0 - vxm->tunnels;
333 tr->vni = t0->vni;
334 }
335
336 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
337 {
338 geneve_encap_trace_t *tr =
339 vlib_add_trace (vm, node, b1, sizeof (*tr));
340 tr->tunnel_index = t1 - vxm->tunnels;
341 tr->vni = t1->vni;
342 }
343
344 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
345 to_next, n_left_to_next,
346 bi0, bi1, next0, next1);
347 }
348
349 while (n_left_from > 0 && n_left_to_next > 0)
350 {
351 u32 bi0;
352 vlib_buffer_t *b0;
353 u32 flow_hash0;
354 u32 len0;
355 ip4_header_t *ip4_0;
356 ip6_header_t *ip6_0;
357 udp_header_t *udp0;
358 u64 *copy_src0, *copy_dst0;
359 u32 *copy_src_last0, *copy_dst_last0;
360 u16 new_l0;
361 ip_csum_t sum0;
362
363 bi0 = from[0];
364 to_next[0] = bi0;
365 from += 1;
366 to_next += 1;
367 n_left_from -= 1;
368 n_left_to_next -= 1;
369
370 b0 = vlib_get_buffer (vm, bi0);
371
372 flow_hash0 = vnet_l2_compute_flow_hash (b0);
373
374 /* Get next node index and adj index from tunnel next_dpo */
375 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
376 {
377 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
378 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
379 t0 = &vxm->tunnels[hi0->dev_instance];
380 /* Note: change to always set next0 if it may be set to drop */
381 next0 = t0->next_dpo.dpoi_next_node;
382 }
383 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
384
385 /* Apply the rewrite string. $$$$ vnet_rewrite? */
386 vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
387
388 if (is_ip4)
389 {
390 u8 ip4_geneve_base_header_len =
391 sizeof (ip4_header_t) + sizeof (udp_header_t) +
392 GENEVE_BASE_HEADER_LENGTH;
393 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
394#if SUPPORT_OPTIONS_HEADER==1
395 ip4_geneve_header_total_len0 += t0->options_len;
396#endif
397 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
398
399 ip4_0 = vlib_buffer_get_current (b0);
400
401 /* Copy the fixed header */
402 copy_dst0 = (u64 *) ip4_0;
403 copy_src0 = (u64 *) t0->rewrite;
404 /* Copy first 32 octets 8-bytes at a time */
405#define _(offs) copy_dst0[offs] = copy_src0[offs];
406 foreach_fixed_header4_offset;
407#undef _
408 /* Last 4 octets. Hopefully gcc will be our friend */
409 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
410 copy_src_last0 = (u32 *) (&copy_src0[4]);
411 copy_dst_last0[0] = copy_src_last0[0];
412
413 /* Fix the IP4 checksum and length */
414 sum0 = ip4_0->checksum;
415 new_l0 = /* old_l0 always 0, see the rewrite setup */
416 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
417 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
418 length /* changed member */ );
419 ip4_0->checksum = ip_csum_fold (sum0);
420 ip4_0->length = new_l0;
421
422 /* Fix UDP length and set source port */
423 udp0 = (udp_header_t *) (ip4_0 + 1);
424 new_l0 =
425 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
426 sizeof (*ip4_0));
427 udp0->length = new_l0;
428 udp0->src_port = flow_hash0;
429 }
430
431 else /* ip6 path */
432 {
433 int bogus = 0;
434
435 u8 ip6_geneve_base_header_len =
436 sizeof (ip6_header_t) + sizeof (udp_header_t) +
437 GENEVE_BASE_HEADER_LENGTH;
438 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
439#if SUPPORT_OPTIONS_HEADER==1
440 ip6_geneve_header_total_len0 += t0->options_len;
441#endif
442 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
443
444 ip6_0 = vlib_buffer_get_current (b0);
445 /* Copy the fixed header */
446 copy_dst0 = (u64 *) ip6_0;
447 copy_src0 = (u64 *) t0->rewrite;
448 /* Copy first 56 (ip6) octets 8-bytes at a time */
449#define _(offs) copy_dst0[offs] = copy_src0[offs];
450 foreach_fixed_header6_offset;
451#undef _
452 /* Fix IP6 payload length */
453 new_l0 =
454 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
455 - sizeof (*ip6_0));
456 ip6_0->payload_length = new_l0;
457
458 /* Fix UDP length and set source port */
459 udp0 = (udp_header_t *) (ip6_0 + 1);
460 udp0->length = new_l0;
461 udp0->src_port = flow_hash0;
462
463 /* IPv6 UDP checksum is mandatory */
464 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0,
465 ip6_0,
466 &bogus);
467 ASSERT (bogus == 0);
468 if (udp0->checksum == 0)
469 udp0->checksum = 0xffff;
470 }
471
472 pkts_encapsulated++;
473 len0 = vlib_buffer_length_in_chain (vm, b0);
474 stats_n_packets += 1;
475 stats_n_bytes += len0;
476
477 /* Batch stats increment on the same geneve tunnel so counter is not
478 incremented per packet. Note stats are still incremented for deleted
479 and admin-down tunnel where packets are dropped. It is not worthwhile
480 to check for this rare case and affect normal path performance. */
481 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
482 {
483 stats_n_packets -= 1;
484 stats_n_bytes -= len0;
485 if (stats_n_packets)
486 vlib_increment_combined_counter
487 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
488 thread_index, stats_sw_if_index,
489 stats_n_packets, stats_n_bytes);
490 stats_n_packets = 1;
491 stats_n_bytes = len0;
492 stats_sw_if_index = sw_if_index0;
493 }
494
495 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
496 {
497 geneve_encap_trace_t *tr =
498 vlib_add_trace (vm, node, b0, sizeof (*tr));
499 tr->tunnel_index = t0 - vxm->tunnels;
500 tr->vni = t0->vni;
501 }
502 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
503 to_next, n_left_to_next,
504 bi0, next0);
505 }
506
507 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
508 }
509
510 /* Do we still need this now that tunnel tx stats is kept? */
511 vlib_node_increment_counter (vm, node->node_index,
512 GENEVE_ENCAP_ERROR_ENCAPSULATED,
513 pkts_encapsulated);
514
515 /* Increment any remaining batch stats */
516 if (stats_n_packets)
517 {
518 vlib_increment_combined_counter
519 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
520 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
521 node->runtime_data[0] = stats_sw_if_index;
522 }
523
524 return from_frame->n_vectors;
525}
526
Filip Tehlar55333d72019-03-05 00:36:04 -0800527VLIB_NODE_FN (geneve4_encap_node) (vlib_main_t * vm,
528 vlib_node_runtime_t * node,
529 vlib_frame_t * from_frame)
Marco Varleseb598f1d2017-09-19 14:25:28 +0200530{
531 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
532}
533
Filip Tehlar55333d72019-03-05 00:36:04 -0800534VLIB_NODE_FN (geneve6_encap_node) (vlib_main_t * vm,
535 vlib_node_runtime_t * node,
536 vlib_frame_t * from_frame)
Marco Varleseb598f1d2017-09-19 14:25:28 +0200537{
538 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
539}
540
541/* *INDENT-OFF* */
542VLIB_REGISTER_NODE (geneve4_encap_node) = {
Marco Varleseb598f1d2017-09-19 14:25:28 +0200543 .name = "geneve4-encap",
544 .vector_size = sizeof (u32),
545 .format_trace = format_geneve_encap_trace,
546 .type = VLIB_NODE_TYPE_INTERNAL,
547 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
548 .error_strings = geneve_encap_error_strings,
549 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
550 .next_nodes = {
551 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
552 },
553};
554
Marco Varleseb598f1d2017-09-19 14:25:28 +0200555VLIB_REGISTER_NODE (geneve6_encap_node) = {
Marco Varleseb598f1d2017-09-19 14:25:28 +0200556 .name = "geneve6-encap",
557 .vector_size = sizeof (u32),
558 .format_trace = format_geneve_encap_trace,
559 .type = VLIB_NODE_TYPE_INTERNAL,
560 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
561 .error_strings = geneve_encap_error_strings,
562 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
563 .next_nodes = {
564 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
565 },
566};
Marco Varleseb598f1d2017-09-19 14:25:28 +0200567/* *INDENT-ON* */
568
569/*
570 * fd.io coding-style-patch-verification: ON
571 *
572 * Local Variables:
573 * eval: (c-set-style "gnu")
574 * End:
575 */