blob: 67ed94b433b05a87f24bcefadfdc5fc57100069d [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070015/**
16 * @file
17 * @brief Functions for encapsulating VXLAN GPE tunnels
18 *
19*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070020#include <vppinfra/error.h>
21#include <vppinfra/hash.h>
22#include <vnet/vnet.h>
23#include <vnet/ip/ip.h>
24#include <vnet/ethernet/ethernet.h>
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070025#include <vnet/vxlan-gpe/vxlan_gpe.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070026
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070027/** Statistics (not really errors) */
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070028#define foreach_vxlan_gpe_encap_error \
Ed Warnickecb9cada2015-12-08 15:45:58 -070029_(ENCAPSULATED, "good packets encapsulated")
30
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070031/**
32 * @brief VXLAN GPE encap error strings
33 */
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070034static char * vxlan_gpe_encap_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070035#define _(sym,string) string,
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070036 foreach_vxlan_gpe_encap_error
Ed Warnickecb9cada2015-12-08 15:45:58 -070037#undef _
38};
39
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070040/**
41 * @brief Struct for VXLAN GPE errors/counters
42 */
Ed Warnickecb9cada2015-12-08 15:45:58 -070043typedef enum {
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070044#define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
45 foreach_vxlan_gpe_encap_error
Ed Warnickecb9cada2015-12-08 15:45:58 -070046#undef _
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070047 VXLAN_GPE_ENCAP_N_ERROR,
48} vxlan_gpe_encap_error_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070050/**
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070051 * @brief Struct for tracing VXLAN GPE encapsulated packets
52 */
Ed Warnickecb9cada2015-12-08 15:45:58 -070053typedef struct {
54 u32 tunnel_index;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070055} vxlan_gpe_encap_trace_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -070056
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070057/**
58 * @brief Trace of packets encapsulated in VXLAN GPE
59 *
60 * @param *s
61 * @param *args
62 *
63 * @return *s
64 *
65 */
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070066u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070067{
68 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070070 vxlan_gpe_encap_trace_t * t
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070071 = va_arg (*args, vxlan_gpe_encap_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070072
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -070073 s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -070074 return s;
75}
76
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070077/**
78 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup
79 *
80 * @param *ngm
81 * @param *b0
82 * @param *t0 contains rewrite header
83 * @param *next0 relative index of next dispatch function (next node)
84 * @param is_v4 Is this IPv4? (or IPv6)
85 *
86 */
Hongjun Nidf921cc2016-05-25 01:16:19 +080087always_inline void
88vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
Vengada Govindan6d403a02016-10-12 05:54:09 -070089 vxlan_gpe_tunnel_t * t0, u32 * next0,
90 u8 is_v4)
Hongjun Nidf921cc2016-05-25 01:16:19 +080091{
92 ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
93 ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
94
Vengada Govindan6d403a02016-10-12 05:54:09 -070095 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
96 next0[0] = t0->encap_next_node;
Hongjun Nidf921cc2016-05-25 01:16:19 +080097}
98
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -070099/**
100 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup for two packets
101 *
102 * @param *ngm
103 * @param *b0 Packet0
104 * @param *b1 Packet1
105 * @param *t0 contains rewrite header for Packet0
106 * @param *t1 contains rewrite header for Packet1
107 * @param *next0 relative index of next dispatch function (next node) for Packet0
108 * @param *next1 relative index of next dispatch function (next node) for Packet1
109 * @param is_v4 Is this IPv4? (or IPv6)
110 *
111 */
Hongjun Nidf921cc2016-05-25 01:16:19 +0800112always_inline void
Vengada Govindan6d403a02016-10-12 05:54:09 -0700113vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
114 vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0,
115 vxlan_gpe_tunnel_t * t1, u32 * next0,
Hongjun Nidf921cc2016-05-25 01:16:19 +0800116 u32 * next1, u8 is_v4)
117{
118 ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
119 ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
120
Vengada Govindan6d403a02016-10-12 05:54:09 -0700121 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
122 ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4);
123 next0[0] = next1[0] = t0->encap_next_node;
Hongjun Nidf921cc2016-05-25 01:16:19 +0800124}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700125
Keith Burns (alagalah)d46cca12016-08-25 11:21:39 -0700126/**
127 * @brief Common processing for IPv4 and IPv6 VXLAN GPE encap dispatch functions
128 *
129 * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
130 * tunnels are "establish local". This means that we don't have a TX interface as yet
131 * as we need to look up where the outer-header dest is. By setting the TX index in the
132 * buffer metadata to the encap FIB, we can do a lookup to get the adjacency and real TX.
133 *
134 * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
135 *
136 * @node vxlan-gpe-input
137 * @param *vm
138 * @param *node
139 * @param *from_frame
140 *
141 * @return from_frame->n_vectors
142 *
143 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144static uword
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700145vxlan_gpe_encap (vlib_main_t * vm,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146 vlib_node_runtime_t * node,
147 vlib_frame_t * from_frame)
148{
Hongjun Nidf921cc2016-05-25 01:16:19 +0800149 u32 n_left_from, next_index, *from, *to_next;
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700150 vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151 vnet_main_t * vnm = ngm->vnet_main;
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700152 vnet_interface_main_t * im = &vnm->interface_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153 u32 pkts_encapsulated = 0;
Damjan Marion586afd72017-04-05 19:18:20 +0200154 u32 thread_index = vlib_get_thread_index ();
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700155 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156
157 from = vlib_frame_vector_args (from_frame);
158 n_left_from = from_frame->n_vectors;
159
160 next_index = node->cached_next_index;
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700161 stats_sw_if_index = node->runtime_data[0];
162 stats_n_packets = stats_n_bytes = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
164 while (n_left_from > 0)
Hongjun Nidf921cc2016-05-25 01:16:19 +0800165 {
166 u32 n_left_to_next;
167
168 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
169
170 while (n_left_from >= 4 && n_left_to_next >= 2)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171 {
Hongjun Nidf921cc2016-05-25 01:16:19 +0800172 u32 bi0, bi1;
173 vlib_buffer_t * b0, *b1;
174 u32 next0, next1;
175 u32 sw_if_index0, sw_if_index1, len0, len1;
176 vnet_hw_interface_t * hi0, *hi1;
177 vxlan_gpe_tunnel_t * t0, *t1;
178 u8 is_ip4_0, is_ip4_1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700179
Hongjun Nidf921cc2016-05-25 01:16:19 +0800180 next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700181
Hongjun Nidf921cc2016-05-25 01:16:19 +0800182 /* Prefetch next iteration. */
183 {
184 vlib_buffer_t * p2, *p3;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700185
Hongjun Nidf921cc2016-05-25 01:16:19 +0800186 p2 = vlib_get_buffer (vm, from[2]);
187 p3 = vlib_get_buffer (vm, from[3]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188
Hongjun Nidf921cc2016-05-25 01:16:19 +0800189 vlib_prefetch_buffer_header(p2, LOAD);
190 vlib_prefetch_buffer_header(p3, LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700191
Hongjun Nidf921cc2016-05-25 01:16:19 +0800192 CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
193 CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
194 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195
Hongjun Nidf921cc2016-05-25 01:16:19 +0800196 bi0 = from[0];
197 bi1 = from[1];
198 to_next[0] = bi0;
199 to_next[1] = bi1;
200 from += 2;
201 to_next += 2;
202 n_left_to_next -= 2;
203 n_left_from -= 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204
Hongjun Nidf921cc2016-05-25 01:16:19 +0800205 b0 = vlib_get_buffer (vm, bi0);
206 b1 = vlib_get_buffer (vm, bi1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207
Hongjun Nidf921cc2016-05-25 01:16:19 +0800208 /* 1-wide cache? */
209 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
210 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
211 hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
212 hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700213
Hongjun Nidf921cc2016-05-25 01:16:19 +0800214 t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
215 t1 = pool_elt_at_index(ngm->tunnels, hi1->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216
Hongjun Nidf921cc2016-05-25 01:16:19 +0800217 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
218 is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219
Hongjun Nidf921cc2016-05-25 01:16:19 +0800220 if (PREDICT_TRUE(is_ip4_0 == is_ip4_1))
221 {
222 vxlan_gpe_encap_two_inline (ngm, b0, b1, t0, t1, &next0, &next1,is_ip4_0);
223 }
224 else
225 {
226 vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
227 vxlan_gpe_encap_one_inline (ngm, b1, t1, &next1, is_ip4_1);
228 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229
Hongjun Nidf921cc2016-05-25 01:16:19 +0800230 /* Reset to look up tunnel partner in the configured FIB */
231 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
232 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
233 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
234 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
235 pkts_encapsulated += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236
Hongjun Nidf921cc2016-05-25 01:16:19 +0800237 len0 = vlib_buffer_length_in_chain (vm, b0);
238 len1 = vlib_buffer_length_in_chain (vm, b0);
239 stats_n_packets += 2;
240 stats_n_bytes += len0 + len1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241
Hongjun Nidf921cc2016-05-25 01:16:19 +0800242 /* Batch stats increment on the same vxlan tunnel so counter is not
243 incremented per packet. Note stats are still incremented for deleted
244 and admin-down tunnel where packets are dropped. It is not worthwhile
245 to check for this rare case and affect normal path performance. */
246 if (PREDICT_FALSE((sw_if_index0 != stats_sw_if_index)
247 || (sw_if_index1 != stats_sw_if_index)))
248 {
249 stats_n_packets -= 2;
250 stats_n_bytes -= len0 + len1;
251 if (sw_if_index0 == sw_if_index1)
252 {
253 if (stats_n_packets)
254 vlib_increment_combined_counter (
255 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200256 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800257 stats_sw_if_index = sw_if_index0;
258 stats_n_packets = 2;
259 stats_n_bytes = len0 + len1;
260 }
261 else
262 {
263 vlib_increment_combined_counter (
264 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200265 thread_index, sw_if_index0, 1, len0);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800266 vlib_increment_combined_counter (
267 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200268 thread_index, sw_if_index1, 1, len1);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800269 }
270 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271
Hongjun Nidf921cc2016-05-25 01:16:19 +0800272 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
273 {
274 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
275 tr->tunnel_index = t0 - ngm->tunnels;
276 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277
Hongjun Nidf921cc2016-05-25 01:16:19 +0800278 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
279 {
280 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1,
281 sizeof(*tr));
282 tr->tunnel_index = t1 - ngm->tunnels;
283 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284
Hongjun Nidf921cc2016-05-25 01:16:19 +0800285 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
286 n_left_to_next, bi0, bi1, next0, next1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287 }
Hongjun Nidf921cc2016-05-25 01:16:19 +0800288
289 while (n_left_from > 0 && n_left_to_next > 0)
290 {
291 u32 bi0;
292 vlib_buffer_t * b0;
293 u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
294 u32 sw_if_index0, len0;
295 vnet_hw_interface_t * hi0;
296 vxlan_gpe_tunnel_t * t0;
297 u8 is_ip4_0;
298
299 bi0 = from[0];
300 to_next[0] = bi0;
301 from += 1;
302 to_next += 1;
303 n_left_from -= 1;
304 n_left_to_next -= 1;
305
306 b0 = vlib_get_buffer (vm, bi0);
307
308 /* 1-wide cache? */
309 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
310 hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
311
312 t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
313
314 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
315
316 vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
317
318 /* Reset to look up tunnel partner in the configured FIB */
319 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
320 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
321 pkts_encapsulated++;
322
323 len0 = vlib_buffer_length_in_chain (vm, b0);
324 stats_n_packets += 1;
325 stats_n_bytes += len0;
326
327 /* Batch stats increment on the same vxlan tunnel so counter is not
328 * incremented per packet. Note stats are still incremented for deleted
329 * and admin-down tunnel where packets are dropped. It is not worthwhile
330 * to check for this rare case and affect normal path performance. */
331 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
332 {
333 stats_n_packets -= 1;
334 stats_n_bytes -= len0;
335 if (stats_n_packets)
336 vlib_increment_combined_counter (
337 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200338 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Hongjun Nidf921cc2016-05-25 01:16:19 +0800339 stats_n_packets = 1;
340 stats_n_bytes = len0;
341 stats_sw_if_index = sw_if_index0;
342 }
343 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
344 {
345 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0,
346 sizeof(*tr));
347 tr->tunnel_index = t0 - ngm->tunnels;
348 }
349 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
350 n_left_to_next, bi0, next0);
351 }
352
353 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
354 }
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700355 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700356 VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357 pkts_encapsulated);
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700358 /* Increment any remaining batch stats */
Hongjun Nidf921cc2016-05-25 01:16:19 +0800359 if (stats_n_packets)
360 {
361 vlib_increment_combined_counter (
Damjan Marion586afd72017-04-05 19:18:20 +0200362 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index,
Hongjun Nidf921cc2016-05-25 01:16:19 +0800363 stats_sw_if_index, stats_n_packets, stats_n_bytes);
364 node->runtime_data[0] = stats_sw_if_index;
365 }
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700366
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367 return from_frame->n_vectors;
368}
369
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700370VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
371 .function = vxlan_gpe_encap,
372 .name = "vxlan-gpe-encap",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700373 .vector_size = sizeof (u32),
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700374 .format_trace = format_vxlan_gpe_encap_trace,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700375 .type = VLIB_NODE_TYPE_INTERNAL,
376
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700377 .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
378 .error_strings = vxlan_gpe_encap_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700380 .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700381
382 .next_nodes = {
Hongjun Nidf921cc2016-05-25 01:16:19 +0800383 [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
Hongjun Nibeb4bf72016-11-25 00:03:46 +0800384 [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
Hongjun Nidf921cc2016-05-25 01:16:19 +0800385 [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700386 },
387};
Keith Burns (alagalah)94b14422016-05-05 18:16:50 -0700388