blob: 87e75e5db2df45e1f0f4e34686edb64b9265596f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/vxlan/vxlan.h>
21
22/* Statistics (not all errors) */
23#define foreach_vxlan_encap_error \
John Lo3ef822e2016-06-07 09:14:07 -040024_(ENCAPSULATED, "good packets encapsulated")
Ed Warnickecb9cada2015-12-08 15:45:58 -070025
26static char * vxlan_encap_error_strings[] = {
27#define _(sym,string) string,
28 foreach_vxlan_encap_error
29#undef _
30};
31
32typedef enum {
33#define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
34 foreach_vxlan_encap_error
35#undef _
36 VXLAN_ENCAP_N_ERROR,
37} vxlan_encap_error_t;
38
39typedef enum {
Ed Warnickecb9cada2015-12-08 15:45:58 -070040 VXLAN_ENCAP_NEXT_DROP,
41 VXLAN_ENCAP_N_NEXT,
42} vxlan_encap_next_t;
43
44typedef struct {
45 u32 tunnel_index;
46 u32 vni;
47} vxlan_encap_trace_t;
48
49u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
50{
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 vxlan_encap_trace_t * t
54 = va_arg (*args, vxlan_encap_trace_t *);
55
John Loc42912d2016-11-07 18:30:47 -050056 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
57 t->tunnel_index, t->vni);
Ed Warnickecb9cada2015-12-08 15:45:58 -070058 return s;
59}
60
Chris Luke99cb3352016-04-26 10:49:53 -040061
62#define foreach_fixed_header4_offset \
Ed Warnickecb9cada2015-12-08 15:45:58 -070063 _(0) _(1) _(2) _(3)
64
Chris Luke99cb3352016-04-26 10:49:53 -040065#define foreach_fixed_header6_offset \
66 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
67
John Loc42912d2016-11-07 18:30:47 -050068always_inline uword
69vxlan_encap_inline (vlib_main_t * vm,
70 vlib_node_runtime_t * node,
71 vlib_frame_t * from_frame,
John Lof4215a62017-09-18 00:20:05 -040072 u8 is_ip4, u8 csum_offload)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
74 u32 n_left_from, next_index, * from, * to_next;
75 vxlan_main_t * vxm = &vxlan_main;
76 vnet_main_t * vnm = vxm->vnet_main;
77 vnet_interface_main_t * im = &vnm->interface_main;
78 u32 pkts_encapsulated = 0;
79 u16 old_l0 = 0, old_l1 = 0;
Damjan Marion586afd72017-04-05 19:18:20 +020080 u32 thread_index = vlib_get_thread_index();
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
John Loc42912d2016-11-07 18:30:47 -050082 u32 sw_if_index0 = 0, sw_if_index1 = 0;
83 u32 next0 = 0, next1 = 0;
84 vnet_hw_interface_t * hi0, * hi1;
85 vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -070086
87 from = vlib_frame_vector_args (from_frame);
88 n_left_from = from_frame->n_vectors;
89
90 next_index = node->cached_next_index;
91 stats_sw_if_index = node->runtime_data[0];
92 stats_n_packets = stats_n_bytes = 0;
93
94 while (n_left_from > 0)
95 {
96 u32 n_left_to_next;
97
98 vlib_get_next_frame (vm, node, next_index,
99 to_next, n_left_to_next);
100
101 while (n_left_from >= 4 && n_left_to_next >= 2)
102 {
103 u32 bi0, bi1;
104 vlib_buffer_t * b0, * b1;
105 u32 flow_hash0, flow_hash1;
John Loc42912d2016-11-07 18:30:47 -0500106 u32 len0, len1;
Chris Luke99cb3352016-04-26 10:49:53 -0400107 ip4_header_t * ip4_0, * ip4_1;
108 ip6_header_t * ip6_0, * ip6_1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109 udp_header_t * udp0, * udp1;
110 u64 * copy_src0, * copy_dst0;
111 u64 * copy_src1, * copy_dst1;
112 u32 * copy_src_last0, * copy_dst_last0;
113 u32 * copy_src_last1, * copy_dst_last1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114 u16 new_l0, new_l1;
115 ip_csum_t sum0, sum1;
116
117 /* Prefetch next iteration. */
118 {
119 vlib_buffer_t * p2, * p3;
120
121 p2 = vlib_get_buffer (vm, from[2]);
122 p3 = vlib_get_buffer (vm, from[3]);
123
124 vlib_prefetch_buffer_header (p2, LOAD);
125 vlib_prefetch_buffer_header (p3, LOAD);
126
127 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
128 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
129 }
130
131 bi0 = from[0];
132 bi1 = from[1];
133 to_next[0] = bi0;
134 to_next[1] = bi1;
135 from += 2;
136 to_next += 2;
137 n_left_to_next -= 2;
138 n_left_from -= 2;
139
140 b0 = vlib_get_buffer (vm, bi0);
141 b1 = vlib_get_buffer (vm, bi1);
142
143 flow_hash0 = vnet_l2_compute_flow_hash (b0);
144 flow_hash1 = vnet_l2_compute_flow_hash (b1);
145
John Loc42912d2016-11-07 18:30:47 -0500146 /* Get next node index and adj index from tunnel next_dpo */
147 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
148 {
149 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
150 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
151 t0 = &vxm->tunnels[hi0->dev_instance];
152 /* Note: change to always set next0 if it may be set to drop */
153 next0 = t0->next_dpo.dpoi_next_node;
154 }
155 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156
John Loc42912d2016-11-07 18:30:47 -0500157 /* Get next node index and adj index from tunnel next_dpo */
158 if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
159 {
160 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
161 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
162 t1 = &vxm->tunnels[hi1->dev_instance];
163 /* Note: change to always set next1 if it may be set to drop */
164 next1 = t1->next_dpo.dpoi_next_node;
165 }
166 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167
168 /* Apply the rewrite string. $$$$ vnet_rewrite? */
169 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
170 vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
171
John Loc42912d2016-11-07 18:30:47 -0500172 if (is_ip4)
173 {
174 /* IP4 VXLAN header should be 36 octects */
175 ASSERT(sizeof(ip4_vxlan_header_t) == 36);
176 ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
177 ASSERT(vec_len(t1->rewrite) == sizeof(ip4_vxlan_header_t));
Chris Luke99cb3352016-04-26 10:49:53 -0400178
John Loc42912d2016-11-07 18:30:47 -0500179 ip4_0 = vlib_buffer_get_current(b0);
180 ip4_1 = vlib_buffer_get_current(b1);
Chris Luke99cb3352016-04-26 10:49:53 -0400181
John Loc42912d2016-11-07 18:30:47 -0500182 /* Copy the fixed header */
183 copy_dst0 = (u64 *) ip4_0;
184 copy_src0 = (u64 *) t0->rewrite;
185 copy_dst1 = (u64 *) ip4_1;
186 copy_src1 = (u64 *) t1->rewrite;
187 /* Copy first 32 octets 8-bytes at a time */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188#define _(offs) copy_dst0[offs] = copy_src0[offs];
John Loc42912d2016-11-07 18:30:47 -0500189 foreach_fixed_header4_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190#undef _
191#define _(offs) copy_dst1[offs] = copy_src1[offs];
John Loc42912d2016-11-07 18:30:47 -0500192 foreach_fixed_header4_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193#undef _
John Loc42912d2016-11-07 18:30:47 -0500194 /* Last 4 octets. Hopefully gcc will be our friend */
Chris Luke99cb3352016-04-26 10:49:53 -0400195 copy_dst_last0 = (u32 *)(&copy_dst0[4]);
196 copy_src_last0 = (u32 *)(&copy_src0[4]);
197 copy_dst_last0[0] = copy_src_last0[0];
Chris Luke99cb3352016-04-26 10:49:53 -0400198 copy_dst_last1 = (u32 *)(&copy_dst1[4]);
199 copy_src_last1 = (u32 *)(&copy_src1[4]);
200 copy_dst_last1[0] = copy_src_last1[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201
John Loc42912d2016-11-07 18:30:47 -0500202 /* Fix the IP4 checksum and length */
John Lof4215a62017-09-18 00:20:05 -0400203 if (csum_offload)
204 {
205 ip4_0->length = clib_host_to_net_u16
206 (vlib_buffer_length_in_chain (vm, b0));
207 b0->flags |=
208 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4;
209 vnet_buffer (b0)->l3_hdr_offset = (u8 *) ip4_0 - b0->data;
210 ip4_1->length = clib_host_to_net_u16
211 (vlib_buffer_length_in_chain (vm, b1));
212 b1->flags |=
213 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4;
214 vnet_buffer (b1)->l3_hdr_offset = (u8 *) ip4_1 - b1->data;
215 }
216 else
217 {
218 sum0 = ip4_0->checksum;
219 new_l0 = /* old_l0 always 0, see the rewrite setup */
220 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
221 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
222 length /* changed member */);
223 ip4_0->checksum = ip_csum_fold (sum0);
224 ip4_0->length = new_l0;
225 sum1 = ip4_1->checksum;
226 new_l1 = /* old_l1 always 0, see the rewrite setup */
227 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
228 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
229 length /* changed member */);
230 ip4_1->checksum = ip_csum_fold (sum1);
231 ip4_1->length = new_l1;
232 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233
John Loc42912d2016-11-07 18:30:47 -0500234 /* Fix UDP length and set source port */
235 udp0 = (udp_header_t *)(ip4_0+1);
236 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
237 - sizeof (*ip4_0));
238 udp0->length = new_l0;
239 udp0->src_port = flow_hash0;
240 udp1 = (udp_header_t *)(ip4_1+1);
241 new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
242 - sizeof (*ip4_1));
243 udp1->length = new_l1;
244 udp1->src_port = flow_hash1;
John Lof4215a62017-09-18 00:20:05 -0400245
246 /* UDP checksum only if checksum offload is used */
247 if (csum_offload)
248 {
249 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
250 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
251 b1->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
252 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
253 }
John Loc42912d2016-11-07 18:30:47 -0500254 }
255 else /* ipv6 */
256 {
257 int bogus = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258
John Loc42912d2016-11-07 18:30:47 -0500259 /* IP6 VXLAN header should be 56 octects */
260 ASSERT(sizeof(ip6_vxlan_header_t) == 56);
261 ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
262 ASSERT(vec_len(t1->rewrite) == sizeof(ip6_vxlan_header_t));
263 ip6_0 = vlib_buffer_get_current(b0);
264 ip6_1 = vlib_buffer_get_current(b1);
Chris Luke99cb3352016-04-26 10:49:53 -0400265
John Loc42912d2016-11-07 18:30:47 -0500266 /* Copy the fixed header */
267 copy_dst0 = (u64 *) ip6_0;
268 copy_src0 = (u64 *) t0->rewrite;
269 copy_dst1 = (u64 *) ip6_1;
270 copy_src1 = (u64 *) t1->rewrite;
271 /* Copy first 56 (ip6) octets 8-bytes at a time */
272#define _(offs) copy_dst0[offs] = copy_src0[offs];
273 foreach_fixed_header6_offset;
274#undef _
275#define _(offs) copy_dst1[offs] = copy_src1[offs];
276 foreach_fixed_header6_offset;
277#undef _
278 /* Fix IP6 payload length */
279 new_l0 =
280 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
281 - sizeof(*ip6_0));
282 ip6_0->payload_length = new_l0;
283 new_l1 =
284 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
285 - sizeof(*ip6_1));
286 ip6_1->payload_length = new_l1;
287
288 /* Fix UDP length and set source port */
289 udp0 = (udp_header_t *)(ip6_0+1);
290 udp0->length = new_l0;
291 udp0->src_port = flow_hash0;
292 udp1 = (udp_header_t *)(ip6_1+1);
293 udp1->length = new_l1;
294 udp1->src_port = flow_hash1;
295
296 /* IPv6 UDP checksum is mandatory */
John Lof4215a62017-09-18 00:20:05 -0400297 if (csum_offload)
298 {
299 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
300 vnet_buffer (b0)->l3_hdr_offset = (u8 *) ip6_0 - b0->data;
301 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
302 b1->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
303 vnet_buffer (b1)->l3_hdr_offset = (u8 *) ip6_1 - b1->data;
304 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
305 }
306 else
307 {
308 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
309 (vm, b0, ip6_0, &bogus);
310 ASSERT(bogus == 0);
311 if (udp0->checksum == 0)
312 udp0->checksum = 0xffff;
313 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
314 (vm, b1, ip6_1, &bogus);
315 ASSERT(bogus == 0);
316 if (udp1->checksum == 0)
317 udp1->checksum = 0xffff;
318 }
John Loc42912d2016-11-07 18:30:47 -0500319 }
Chris Luke99cb3352016-04-26 10:49:53 -0400320
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321 pkts_encapsulated += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322 len0 = vlib_buffer_length_in_chain (vm, b0);
John Loc42912d2016-11-07 18:30:47 -0500323 len1 = vlib_buffer_length_in_chain (vm, b1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324 stats_n_packets += 2;
325 stats_n_bytes += len0 + len1;
326
327 /* Batch stats increment on the same vxlan tunnel so counter is not
328 incremented per packet. Note stats are still incremented for deleted
329 and admin-down tunnel where packets are dropped. It is not worthwhile
330 to check for this rare case and affect normal path performance. */
331 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
Hongjun Nib2cdd2f2016-04-06 16:20:22 -0700332 (sw_if_index1 != stats_sw_if_index)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333 {
334 stats_n_packets -= 2;
335 stats_n_bytes -= len0 + len1;
336 if (sw_if_index0 == sw_if_index1)
337 {
338 if (stats_n_packets)
339 vlib_increment_combined_counter
340 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200341 thread_index, stats_sw_if_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700342 stats_n_packets, stats_n_bytes);
343 stats_sw_if_index = sw_if_index0;
344 stats_n_packets = 2;
345 stats_n_bytes = len0 + len1;
346 }
347 else
348 {
349 vlib_increment_combined_counter
350 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200351 thread_index, sw_if_index0, 1, len0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352 vlib_increment_combined_counter
353 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200354 thread_index, sw_if_index1, 1, len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700355 }
356 }
357
358 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
359 {
360 vxlan_encap_trace_t *tr =
361 vlib_add_trace (vm, node, b0, sizeof (*tr));
362 tr->tunnel_index = t0 - vxm->tunnels;
363 tr->vni = t0->vni;
364 }
365
366 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
367 {
368 vxlan_encap_trace_t *tr =
369 vlib_add_trace (vm, node, b1, sizeof (*tr));
370 tr->tunnel_index = t1 - vxm->tunnels;
371 tr->vni = t1->vni;
372 }
373
374 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
375 to_next, n_left_to_next,
376 bi0, bi1, next0, next1);
377 }
378
379 while (n_left_from > 0 && n_left_to_next > 0)
380 {
381 u32 bi0;
382 vlib_buffer_t * b0;
383 u32 flow_hash0;
John Loc42912d2016-11-07 18:30:47 -0500384 u32 len0;
Chris Luke99cb3352016-04-26 10:49:53 -0400385 ip4_header_t * ip4_0;
386 ip6_header_t * ip6_0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387 udp_header_t * udp0;
388 u64 * copy_src0, * copy_dst0;
389 u32 * copy_src_last0, * copy_dst_last0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390 u16 new_l0;
391 ip_csum_t sum0;
392
393 bi0 = from[0];
394 to_next[0] = bi0;
395 from += 1;
396 to_next += 1;
397 n_left_from -= 1;
398 n_left_to_next -= 1;
399
400 b0 = vlib_get_buffer (vm, bi0);
401
402 flow_hash0 = vnet_l2_compute_flow_hash(b0);
403
John Loc42912d2016-11-07 18:30:47 -0500404 /* Get next node index and adj index from tunnel next_dpo */
405 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
406 {
407 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
408 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
409 t0 = &vxm->tunnels[hi0->dev_instance];
410 /* Note: change to always set next0 if it may be set to drop */
411 next0 = t0->next_dpo.dpoi_next_node;
412 }
413 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414
415 /* Apply the rewrite string. $$$$ vnet_rewrite? */
416 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
417
John Loc42912d2016-11-07 18:30:47 -0500418 if (is_ip4)
419 {
420 /* IP4 VXLAN header should be 36 octects */
421 ASSERT(sizeof(ip4_vxlan_header_t) == 36);
422 ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
423 ip4_0 = vlib_buffer_get_current(b0);
Chris Luke99cb3352016-04-26 10:49:53 -0400424
John Loc42912d2016-11-07 18:30:47 -0500425 /* Copy the fixed header */
426 copy_dst0 = (u64 *) ip4_0;
427 copy_src0 = (u64 *) t0->rewrite;
428 /* Copy first 32 octets 8-bytes at a time */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429#define _(offs) copy_dst0[offs] = copy_src0[offs];
John Loc42912d2016-11-07 18:30:47 -0500430 foreach_fixed_header4_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700431#undef _
John Loc42912d2016-11-07 18:30:47 -0500432 /* Last 4 octets. Hopefully gcc will be our friend */
433 copy_dst_last0 = (u32 *)(&copy_dst0[4]);
434 copy_src_last0 = (u32 *)(&copy_src0[4]);
435 copy_dst_last0[0] = copy_src_last0[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436
John Loc42912d2016-11-07 18:30:47 -0500437 /* Fix the IP4 checksum and length */
John Lof4215a62017-09-18 00:20:05 -0400438 if (csum_offload)
439 {
440 ip4_0->length =
441 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
442 b0->flags |=
443 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4;
444 vnet_buffer (b0)->l3_hdr_offset = (u8 *) ip4_0 - b0->data;
445 }
446 else
447 {
448 sum0 = ip4_0->checksum;
449 new_l0 = /* old_l0 always 0, see the rewrite setup */
450 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
451 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
452 length /* changed member */);
453 ip4_0->checksum = ip_csum_fold (sum0);
454 ip4_0->length = new_l0;
455 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700456
John Loc42912d2016-11-07 18:30:47 -0500457 /* Fix UDP length and set source port */
458 udp0 = (udp_header_t *)(ip4_0+1);
459 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
460 - sizeof (*ip4_0));
461 udp0->length = new_l0;
462 udp0->src_port = flow_hash0;
John Lof4215a62017-09-18 00:20:05 -0400463
464 /* UDP checksum only if checksum offload is used */
465 if (csum_offload)
466 {
467 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
468 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
469 }
John Loc42912d2016-11-07 18:30:47 -0500470 }
Chris Luke99cb3352016-04-26 10:49:53 -0400471
John Loc42912d2016-11-07 18:30:47 -0500472 else /* ip6 path */
473 {
474 int bogus = 0;
Chris Luke99cb3352016-04-26 10:49:53 -0400475
John Loc42912d2016-11-07 18:30:47 -0500476 /* IP6 VXLAN header should be 56 octects */
477 ASSERT(sizeof(ip6_vxlan_header_t) == 56);
478 ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
479 ip6_0 = vlib_buffer_get_current(b0);
480 /* Copy the fixed header */
481 copy_dst0 = (u64 *) ip6_0;
482 copy_src0 = (u64 *) t0->rewrite;
483 /* Copy first 56 (ip6) octets 8-bytes at a time */
484#define _(offs) copy_dst0[offs] = copy_src0[offs];
485 foreach_fixed_header6_offset;
486#undef _
487 /* Fix IP6 payload length */
488 new_l0 =
489 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
490 - sizeof(*ip6_0));
491 ip6_0->payload_length = new_l0;
Chris Luke99cb3352016-04-26 10:49:53 -0400492
John Loc42912d2016-11-07 18:30:47 -0500493 /* Fix UDP length and set source port */
494 udp0 = (udp_header_t *)(ip6_0+1);
495 udp0->length = new_l0;
496 udp0->src_port = flow_hash0;
497
498 /* IPv6 UDP checksum is mandatory */
John Lof4215a62017-09-18 00:20:05 -0400499 if (csum_offload)
500 {
501 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
502 vnet_buffer (b0)->l3_hdr_offset = (u8 *) ip6_0 - b0->data;
503 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
504 }
505 else
506 {
507 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
508 (vm, b0, ip6_0, &bogus);
509 ASSERT(bogus == 0);
510 if (udp0->checksum == 0)
511 udp0->checksum = 0xffff;
512 }
John Loc42912d2016-11-07 18:30:47 -0500513 }
514
Ed Warnickecb9cada2015-12-08 15:45:58 -0700515 pkts_encapsulated ++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516 len0 = vlib_buffer_length_in_chain (vm, b0);
517 stats_n_packets += 1;
518 stats_n_bytes += len0;
519
520 /* Batch stats increment on the same vxlan tunnel so counter is not
521 incremented per packet. Note stats are still incremented for deleted
522 and admin-down tunnel where packets are dropped. It is not worthwhile
523 to check for this rare case and affect normal path performance. */
524 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
525 {
526 stats_n_packets -= 1;
527 stats_n_bytes -= len0;
528 if (stats_n_packets)
529 vlib_increment_combined_counter
530 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200531 thread_index, stats_sw_if_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700532 stats_n_packets, stats_n_bytes);
533 stats_n_packets = 1;
534 stats_n_bytes = len0;
535 stats_sw_if_index = sw_if_index0;
536 }
537
538 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
539 {
540 vxlan_encap_trace_t *tr =
541 vlib_add_trace (vm, node, b0, sizeof (*tr));
542 tr->tunnel_index = t0 - vxm->tunnels;
543 tr->vni = t0->vni;
544 }
545 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
546 to_next, n_left_to_next,
547 bi0, next0);
548 }
549
550 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
551 }
552
553 /* Do we still need this now that tunnel tx stats is kept? */
554 vlib_node_increment_counter (vm, node->node_index,
555 VXLAN_ENCAP_ERROR_ENCAPSULATED,
556 pkts_encapsulated);
557
558 /* Increment any remaining batch stats */
559 if (stats_n_packets)
560 {
561 vlib_increment_combined_counter
562 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
Damjan Marion586afd72017-04-05 19:18:20 +0200563 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700564 node->runtime_data[0] = stats_sw_if_index;
565 }
566
567 return from_frame->n_vectors;
568}
569
John Loc42912d2016-11-07 18:30:47 -0500570static uword
571vxlan4_encap (vlib_main_t * vm,
572 vlib_node_runtime_t * node,
573 vlib_frame_t * from_frame)
574{
John Lof4215a62017-09-18 00:20:05 -0400575 /* Disable chksum offload as setup overhead in tx node is not worthwhile
576 for ip4 header checksum only, unless udp checksum is also required */
577 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
578 /* csum_offload */ 0);
John Loc42912d2016-11-07 18:30:47 -0500579}
580
581static uword
582vxlan6_encap (vlib_main_t * vm,
583 vlib_node_runtime_t * node,
584 vlib_frame_t * from_frame)
585{
John Lof4215a62017-09-18 00:20:05 -0400586 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
587 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
588 /* csum_offload */ 1);
John Loc42912d2016-11-07 18:30:47 -0500589}
590
591VLIB_REGISTER_NODE (vxlan4_encap_node) = {
592 .function = vxlan4_encap,
593 .name = "vxlan4-encap",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700594 .vector_size = sizeof (u32),
595 .format_trace = format_vxlan_encap_trace,
596 .type = VLIB_NODE_TYPE_INTERNAL,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700597 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
598 .error_strings = vxlan_encap_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700599 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700600 .next_nodes = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
602 },
603};
Damjan Marion1c80e832016-05-11 23:07:18 +0200604
John Loc42912d2016-11-07 18:30:47 -0500605VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_encap_node, vxlan4_encap)
606
607VLIB_REGISTER_NODE (vxlan6_encap_node) = {
608 .function = vxlan6_encap,
609 .name = "vxlan6-encap",
610 .vector_size = sizeof (u32),
611 .format_trace = format_vxlan_encap_trace,
612 .type = VLIB_NODE_TYPE_INTERNAL,
613 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
614 .error_strings = vxlan_encap_error_strings,
615 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
616 .next_nodes = {
617 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
618 },
619};
620
621VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_encap_node, vxlan6_encap)
Damjan Marion1c80e832016-05-11 23:07:18 +0200622