blob: 5b63064a8481ebdd7b88177943074b542bd47bdf [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/vxlan/vxlan.h>
21
22/* Statistics (not all errors) */
23#define foreach_vxlan_encap_error \
John Lo3ef822e2016-06-07 09:14:07 -040024_(ENCAPSULATED, "good packets encapsulated")
Ed Warnickecb9cada2015-12-08 15:45:58 -070025
26static char * vxlan_encap_error_strings[] = {
27#define _(sym,string) string,
28 foreach_vxlan_encap_error
29#undef _
30};
31
32typedef enum {
33#define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
34 foreach_vxlan_encap_error
35#undef _
36 VXLAN_ENCAP_N_ERROR,
37} vxlan_encap_error_t;
38
39typedef enum {
Ed Warnickecb9cada2015-12-08 15:45:58 -070040 VXLAN_ENCAP_NEXT_DROP,
41 VXLAN_ENCAP_N_NEXT,
42} vxlan_encap_next_t;
43
44typedef struct {
45 u32 tunnel_index;
46 u32 vni;
47} vxlan_encap_trace_t;
48
49u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
50{
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 vxlan_encap_trace_t * t
54 = va_arg (*args, vxlan_encap_trace_t *);
55
John Loc42912d2016-11-07 18:30:47 -050056 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
57 t->tunnel_index, t->vni);
Ed Warnickecb9cada2015-12-08 15:45:58 -070058 return s;
59}
60
Chris Luke99cb3352016-04-26 10:49:53 -040061
62#define foreach_fixed_header4_offset \
Ed Warnickecb9cada2015-12-08 15:45:58 -070063 _(0) _(1) _(2) _(3)
64
Chris Luke99cb3352016-04-26 10:49:53 -040065#define foreach_fixed_header6_offset \
66 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
67
John Loc42912d2016-11-07 18:30:47 -050068always_inline uword
69vxlan_encap_inline (vlib_main_t * vm,
70 vlib_node_runtime_t * node,
71 vlib_frame_t * from_frame,
72 u32 is_ip4)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 u32 n_left_from, next_index, * from, * to_next;
75 vxlan_main_t * vxm = &vxlan_main;
76 vnet_main_t * vnm = vxm->vnet_main;
77 vnet_interface_main_t * im = &vnm->interface_main;
78 u32 pkts_encapsulated = 0;
79 u16 old_l0 = 0, old_l1 = 0;
80 u32 cpu_index = os_get_cpu_number();
81 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
John Loc42912d2016-11-07 18:30:47 -050082 u32 sw_if_index0 = 0, sw_if_index1 = 0;
83 u32 next0 = 0, next1 = 0;
84 vnet_hw_interface_t * hi0, * hi1;
85 vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -070086
87 from = vlib_frame_vector_args (from_frame);
88 n_left_from = from_frame->n_vectors;
89
90 next_index = node->cached_next_index;
91 stats_sw_if_index = node->runtime_data[0];
92 stats_n_packets = stats_n_bytes = 0;
93
94 while (n_left_from > 0)
95 {
96 u32 n_left_to_next;
97
98 vlib_get_next_frame (vm, node, next_index,
99 to_next, n_left_to_next);
100
101 while (n_left_from >= 4 && n_left_to_next >= 2)
102 {
103 u32 bi0, bi1;
104 vlib_buffer_t * b0, * b1;
105 u32 flow_hash0, flow_hash1;
John Loc42912d2016-11-07 18:30:47 -0500106 u32 len0, len1;
Chris Luke99cb3352016-04-26 10:49:53 -0400107 ip4_header_t * ip4_0, * ip4_1;
108 ip6_header_t * ip6_0, * ip6_1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109 udp_header_t * udp0, * udp1;
110 u64 * copy_src0, * copy_dst0;
111 u64 * copy_src1, * copy_dst1;
112 u32 * copy_src_last0, * copy_dst_last0;
113 u32 * copy_src_last1, * copy_dst_last1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114 u16 new_l0, new_l1;
115 ip_csum_t sum0, sum1;
116
117 /* Prefetch next iteration. */
118 {
119 vlib_buffer_t * p2, * p3;
120
121 p2 = vlib_get_buffer (vm, from[2]);
122 p3 = vlib_get_buffer (vm, from[3]);
123
124 vlib_prefetch_buffer_header (p2, LOAD);
125 vlib_prefetch_buffer_header (p3, LOAD);
126
127 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
128 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
129 }
130
131 bi0 = from[0];
132 bi1 = from[1];
133 to_next[0] = bi0;
134 to_next[1] = bi1;
135 from += 2;
136 to_next += 2;
137 n_left_to_next -= 2;
138 n_left_from -= 2;
139
140 b0 = vlib_get_buffer (vm, bi0);
141 b1 = vlib_get_buffer (vm, bi1);
142
143 flow_hash0 = vnet_l2_compute_flow_hash (b0);
144 flow_hash1 = vnet_l2_compute_flow_hash (b1);
145
John Loc42912d2016-11-07 18:30:47 -0500146 /* Get next node index and adj index from tunnel next_dpo */
147 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
148 {
149 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
150 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
151 t0 = &vxm->tunnels[hi0->dev_instance];
152 /* Note: change to always set next0 if it may be set to drop */
153 next0 = t0->next_dpo.dpoi_next_node;
154 }
155 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156
John Loc42912d2016-11-07 18:30:47 -0500157 /* Get next node index and adj index from tunnel next_dpo */
158 if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
159 {
160 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
161 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
162 t1 = &vxm->tunnels[hi1->dev_instance];
163 /* Note: change to always set next1 if it may be set to drop */
164 next1 = t1->next_dpo.dpoi_next_node;
165 }
166 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167
168 /* Apply the rewrite string. $$$$ vnet_rewrite? */
169 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
170 vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
171
John Loc42912d2016-11-07 18:30:47 -0500172 if (is_ip4)
173 {
174 /* IP4 VXLAN header should be 36 octects */
175 ASSERT(sizeof(ip4_vxlan_header_t) == 36);
176 ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
177 ASSERT(vec_len(t1->rewrite) == sizeof(ip4_vxlan_header_t));
Chris Luke99cb3352016-04-26 10:49:53 -0400178
John Loc42912d2016-11-07 18:30:47 -0500179 ip4_0 = vlib_buffer_get_current(b0);
180 ip4_1 = vlib_buffer_get_current(b1);
Chris Luke99cb3352016-04-26 10:49:53 -0400181
John Loc42912d2016-11-07 18:30:47 -0500182 /* Copy the fixed header */
183 copy_dst0 = (u64 *) ip4_0;
184 copy_src0 = (u64 *) t0->rewrite;
185 copy_dst1 = (u64 *) ip4_1;
186 copy_src1 = (u64 *) t1->rewrite;
187 /* Copy first 32 octets 8-bytes at a time */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188#define _(offs) copy_dst0[offs] = copy_src0[offs];
John Loc42912d2016-11-07 18:30:47 -0500189 foreach_fixed_header4_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190#undef _
191#define _(offs) copy_dst1[offs] = copy_src1[offs];
John Loc42912d2016-11-07 18:30:47 -0500192 foreach_fixed_header4_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193#undef _
John Loc42912d2016-11-07 18:30:47 -0500194 /* Last 4 octets. Hopefully gcc will be our friend */
Chris Luke99cb3352016-04-26 10:49:53 -0400195 copy_dst_last0 = (u32 *)(&copy_dst0[4]);
196 copy_src_last0 = (u32 *)(&copy_src0[4]);
197 copy_dst_last0[0] = copy_src_last0[0];
Chris Luke99cb3352016-04-26 10:49:53 -0400198 copy_dst_last1 = (u32 *)(&copy_dst1[4]);
199 copy_src_last1 = (u32 *)(&copy_src1[4]);
200 copy_dst_last1[0] = copy_src_last1[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201
John Loc42912d2016-11-07 18:30:47 -0500202 /* Fix the IP4 checksum and length */
203 sum0 = ip4_0->checksum;
204 new_l0 = /* old_l0 always 0, see the rewrite setup */
205 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
Chris Luke99cb3352016-04-26 10:49:53 -0400206 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
John Loc42912d2016-11-07 18:30:47 -0500207 length /* changed member */);
208 ip4_0->checksum = ip_csum_fold (sum0);
209 ip4_0->length = new_l0;
210 sum1 = ip4_1->checksum;
211 new_l1 = /* old_l1 always 0, see the rewrite setup */
212 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
Chris Luke99cb3352016-04-26 10:49:53 -0400213 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
John Loc42912d2016-11-07 18:30:47 -0500214 length /* changed member */);
215 ip4_1->checksum = ip_csum_fold (sum1);
216 ip4_1->length = new_l1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700217
John Loc42912d2016-11-07 18:30:47 -0500218 /* Fix UDP length and set source port */
219 udp0 = (udp_header_t *)(ip4_0+1);
220 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
221 - sizeof (*ip4_0));
222 udp0->length = new_l0;
223 udp0->src_port = flow_hash0;
224 udp1 = (udp_header_t *)(ip4_1+1);
225 new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
226 - sizeof (*ip4_1));
227 udp1->length = new_l1;
228 udp1->src_port = flow_hash1;
229 }
230 else /* ipv6 */
231 {
232 int bogus = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233
John Loc42912d2016-11-07 18:30:47 -0500234 /* IP6 VXLAN header should be 56 octects */
235 ASSERT(sizeof(ip6_vxlan_header_t) == 56);
236 ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
237 ASSERT(vec_len(t1->rewrite) == sizeof(ip6_vxlan_header_t));
238 ip6_0 = vlib_buffer_get_current(b0);
239 ip6_1 = vlib_buffer_get_current(b1);
Chris Luke99cb3352016-04-26 10:49:53 -0400240
John Loc42912d2016-11-07 18:30:47 -0500241 /* Copy the fixed header */
242 copy_dst0 = (u64 *) ip6_0;
243 copy_src0 = (u64 *) t0->rewrite;
244 copy_dst1 = (u64 *) ip6_1;
245 copy_src1 = (u64 *) t1->rewrite;
246 /* Copy first 56 (ip6) octets 8-bytes at a time */
247#define _(offs) copy_dst0[offs] = copy_src0[offs];
248 foreach_fixed_header6_offset;
249#undef _
250#define _(offs) copy_dst1[offs] = copy_src1[offs];
251 foreach_fixed_header6_offset;
252#undef _
253 /* Fix IP6 payload length */
254 new_l0 =
255 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
256 - sizeof(*ip6_0));
257 ip6_0->payload_length = new_l0;
258 new_l1 =
259 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
260 - sizeof(*ip6_1));
261 ip6_1->payload_length = new_l1;
262
263 /* Fix UDP length and set source port */
264 udp0 = (udp_header_t *)(ip6_0+1);
265 udp0->length = new_l0;
266 udp0->src_port = flow_hash0;
267 udp1 = (udp_header_t *)(ip6_1+1);
268 udp1->length = new_l1;
269 udp1->src_port = flow_hash1;
270
271 /* IPv6 UDP checksum is mandatory */
272 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
273 ip6_0, &bogus);
274 ASSERT(bogus == 0);
275 if (udp0->checksum == 0)
276 udp0->checksum = 0xffff;
277 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
Chris Luke99cb3352016-04-26 10:49:53 -0400278 ip6_1, &bogus);
John Loc42912d2016-11-07 18:30:47 -0500279 ASSERT(bogus == 0);
280 if (udp1->checksum == 0)
281 udp1->checksum = 0xffff;
282 }
Chris Luke99cb3352016-04-26 10:49:53 -0400283
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284 pkts_encapsulated += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285 len0 = vlib_buffer_length_in_chain (vm, b0);
John Loc42912d2016-11-07 18:30:47 -0500286 len1 = vlib_buffer_length_in_chain (vm, b1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287 stats_n_packets += 2;
288 stats_n_bytes += len0 + len1;
289
290 /* Batch stats increment on the same vxlan tunnel so counter is not
291 incremented per packet. Note stats are still incremented for deleted
292 and admin-down tunnel where packets are dropped. It is not worthwhile
293 to check for this rare case and affect normal path performance. */
294 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700295 (sw_if_index1 != stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296 {
297 stats_n_packets -= 2;
298 stats_n_bytes -= len0 + len1;
299 if (sw_if_index0 == sw_if_index1)
300 {
301 if (stats_n_packets)
302 vlib_increment_combined_counter
303 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
304 cpu_index, stats_sw_if_index,
305 stats_n_packets, stats_n_bytes);
306 stats_sw_if_index = sw_if_index0;
307 stats_n_packets = 2;
308 stats_n_bytes = len0 + len1;
309 }
310 else
311 {
312 vlib_increment_combined_counter
313 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
314 cpu_index, sw_if_index0, 1, len0);
315 vlib_increment_combined_counter
316 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
317 cpu_index, sw_if_index1, 1, len1);
318 }
319 }
320
321 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
322 {
323 vxlan_encap_trace_t *tr =
324 vlib_add_trace (vm, node, b0, sizeof (*tr));
325 tr->tunnel_index = t0 - vxm->tunnels;
326 tr->vni = t0->vni;
327 }
328
329 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
330 {
331 vxlan_encap_trace_t *tr =
332 vlib_add_trace (vm, node, b1, sizeof (*tr));
333 tr->tunnel_index = t1 - vxm->tunnels;
334 tr->vni = t1->vni;
335 }
336
337 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
338 to_next, n_left_to_next,
339 bi0, bi1, next0, next1);
340 }
341
342 while (n_left_from > 0 && n_left_to_next > 0)
343 {
344 u32 bi0;
345 vlib_buffer_t * b0;
346 u32 flow_hash0;
John Loc42912d2016-11-07 18:30:47 -0500347 u32 len0;
Chris Luke99cb3352016-04-26 10:49:53 -0400348 ip4_header_t * ip4_0;
349 ip6_header_t * ip6_0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350 udp_header_t * udp0;
351 u64 * copy_src0, * copy_dst0;
352 u32 * copy_src_last0, * copy_dst_last0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353 u16 new_l0;
354 ip_csum_t sum0;
355
356 bi0 = from[0];
357 to_next[0] = bi0;
358 from += 1;
359 to_next += 1;
360 n_left_from -= 1;
361 n_left_to_next -= 1;
362
363 b0 = vlib_get_buffer (vm, bi0);
364
365 flow_hash0 = vnet_l2_compute_flow_hash(b0);
366
John Loc42912d2016-11-07 18:30:47 -0500367 /* Get next node index and adj index from tunnel next_dpo */
368 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
369 {
370 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
371 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
372 t0 = &vxm->tunnels[hi0->dev_instance];
373 /* Note: change to always set next0 if it may be set to drop */
374 next0 = t0->next_dpo.dpoi_next_node;
375 }
376 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700377
378 /* Apply the rewrite string. $$$$ vnet_rewrite? */
379 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
380
John Loc42912d2016-11-07 18:30:47 -0500381 if (is_ip4)
382 {
383 /* IP4 VXLAN header should be 36 octects */
384 ASSERT(sizeof(ip4_vxlan_header_t) == 36);
385 ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
386 ip4_0 = vlib_buffer_get_current(b0);
Chris Luke99cb3352016-04-26 10:49:53 -0400387
John Loc42912d2016-11-07 18:30:47 -0500388 /* Copy the fixed header */
389 copy_dst0 = (u64 *) ip4_0;
390 copy_src0 = (u64 *) t0->rewrite;
391 /* Copy first 32 octets 8-bytes at a time */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700392#define _(offs) copy_dst0[offs] = copy_src0[offs];
John Loc42912d2016-11-07 18:30:47 -0500393 foreach_fixed_header4_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700394#undef _
John Loc42912d2016-11-07 18:30:47 -0500395 /* Last 4 octets. Hopefully gcc will be our friend */
396 copy_dst_last0 = (u32 *)(&copy_dst0[4]);
397 copy_src_last0 = (u32 *)(&copy_src0[4]);
398 copy_dst_last0[0] = copy_src_last0[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700399
John Loc42912d2016-11-07 18:30:47 -0500400 /* Fix the IP4 checksum and length */
401 sum0 = ip4_0->checksum;
402 new_l0 = /* old_l0 always 0, see the rewrite setup */
403 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
Chris Luke99cb3352016-04-26 10:49:53 -0400404 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
John Loc42912d2016-11-07 18:30:47 -0500405 length /* changed member */);
406 ip4_0->checksum = ip_csum_fold (sum0);
407 ip4_0->length = new_l0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700408
John Loc42912d2016-11-07 18:30:47 -0500409 /* Fix UDP length and set source port */
410 udp0 = (udp_header_t *)(ip4_0+1);
411 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
412 - sizeof (*ip4_0));
413 udp0->length = new_l0;
414 udp0->src_port = flow_hash0;
415 }
Chris Luke99cb3352016-04-26 10:49:53 -0400416
John Loc42912d2016-11-07 18:30:47 -0500417 else /* ip6 path */
418 {
419 int bogus = 0;
Chris Luke99cb3352016-04-26 10:49:53 -0400420
John Loc42912d2016-11-07 18:30:47 -0500421 /* IP6 VXLAN header should be 56 octects */
422 ASSERT(sizeof(ip6_vxlan_header_t) == 56);
423 ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
424 ip6_0 = vlib_buffer_get_current(b0);
425 /* Copy the fixed header */
426 copy_dst0 = (u64 *) ip6_0;
427 copy_src0 = (u64 *) t0->rewrite;
428 /* Copy first 56 (ip6) octets 8-bytes at a time */
429#define _(offs) copy_dst0[offs] = copy_src0[offs];
430 foreach_fixed_header6_offset;
431#undef _
432 /* Fix IP6 payload length */
433 new_l0 =
434 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
435 - sizeof(*ip6_0));
436 ip6_0->payload_length = new_l0;
Chris Luke99cb3352016-04-26 10:49:53 -0400437
John Loc42912d2016-11-07 18:30:47 -0500438 /* Fix UDP length and set source port */
439 udp0 = (udp_header_t *)(ip6_0+1);
440 udp0->length = new_l0;
441 udp0->src_port = flow_hash0;
442
443 /* IPv6 UDP checksum is mandatory */
444 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
445 ip6_0, &bogus);
446 ASSERT(bogus == 0);
447 if (udp0->checksum == 0)
448 udp0->checksum = 0xffff;
449 }
450
Ed Warnickecb9cada2015-12-08 15:45:58 -0700451 pkts_encapsulated ++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700452 len0 = vlib_buffer_length_in_chain (vm, b0);
453 stats_n_packets += 1;
454 stats_n_bytes += len0;
455
456 /* Batch stats increment on the same vxlan tunnel so counter is not
457 incremented per packet. Note stats are still incremented for deleted
458 and admin-down tunnel where packets are dropped. It is not worthwhile
459 to check for this rare case and affect normal path performance. */
460 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
461 {
462 stats_n_packets -= 1;
463 stats_n_bytes -= len0;
464 if (stats_n_packets)
465 vlib_increment_combined_counter
466 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
467 cpu_index, stats_sw_if_index,
468 stats_n_packets, stats_n_bytes);
469 stats_n_packets = 1;
470 stats_n_bytes = len0;
471 stats_sw_if_index = sw_if_index0;
472 }
473
474 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
475 {
476 vxlan_encap_trace_t *tr =
477 vlib_add_trace (vm, node, b0, sizeof (*tr));
478 tr->tunnel_index = t0 - vxm->tunnels;
479 tr->vni = t0->vni;
480 }
481 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
482 to_next, n_left_to_next,
483 bi0, next0);
484 }
485
486 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
487 }
488
489 /* Do we still need this now that tunnel tx stats is kept? */
490 vlib_node_increment_counter (vm, node->node_index,
491 VXLAN_ENCAP_ERROR_ENCAPSULATED,
492 pkts_encapsulated);
493
494 /* Increment any remaining batch stats */
495 if (stats_n_packets)
496 {
497 vlib_increment_combined_counter
498 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
499 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
500 node->runtime_data[0] = stats_sw_if_index;
501 }
502
503 return from_frame->n_vectors;
504}
505
John Loc42912d2016-11-07 18:30:47 -0500506static uword
507vxlan4_encap (vlib_main_t * vm,
508 vlib_node_runtime_t * node,
509 vlib_frame_t * from_frame)
510{
511 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
512}
513
514static uword
515vxlan6_encap (vlib_main_t * vm,
516 vlib_node_runtime_t * node,
517 vlib_frame_t * from_frame)
518{
519 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
520}
521
522VLIB_REGISTER_NODE (vxlan4_encap_node) = {
523 .function = vxlan4_encap,
524 .name = "vxlan4-encap",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700525 .vector_size = sizeof (u32),
526 .format_trace = format_vxlan_encap_trace,
527 .type = VLIB_NODE_TYPE_INTERNAL,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700528 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
529 .error_strings = vxlan_encap_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700530 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700531 .next_nodes = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700532 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
533 },
534};
Damjan Marion1c80e832016-05-11 23:07:18 +0200535
John Loc42912d2016-11-07 18:30:47 -0500536VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_encap_node, vxlan4_encap)
537
538VLIB_REGISTER_NODE (vxlan6_encap_node) = {
539 .function = vxlan6_encap,
540 .name = "vxlan6-encap",
541 .vector_size = sizeof (u32),
542 .format_trace = format_vxlan_encap_trace,
543 .type = VLIB_NODE_TYPE_INTERNAL,
544 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
545 .error_strings = vxlan_encap_error_strings,
546 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
547 .next_nodes = {
548 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
549 },
550};
551
552VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_encap_node, vxlan6_encap)
Damjan Marion1c80e832016-05-11 23:07:18 +0200553