blob: 31320900d3e6589e17f5e0aa543b4a630e05b216 [file] [log] [blame]
Eyal Bari3ce0b082018-01-17 12:06:32 +02001
Ed Warnickecb9cada2015-12-08 15:45:58 -07002/*
3 * Copyright (c) 2015 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include <vppinfra/error.h>
17#include <vppinfra/hash.h>
18#include <vnet/vnet.h>
19#include <vnet/ip/ip.h>
20#include <vnet/ethernet/ethernet.h>
21#include <vnet/vxlan/vxlan.h>
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -070022#include <vnet/qos/qos_types.h>
eyal bari82e21d72018-04-26 13:14:55 +030023#include <vnet/adj/rewrite.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070024
25/* Statistics (not all errors) */
26#define foreach_vxlan_encap_error \
John Lo3ef822e2016-06-07 09:14:07 -040027_(ENCAPSULATED, "good packets encapsulated")
Ed Warnickecb9cada2015-12-08 15:45:58 -070028
29static char * vxlan_encap_error_strings[] = {
30#define _(sym,string) string,
31 foreach_vxlan_encap_error
32#undef _
33};
34
35typedef enum {
36#define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
37 foreach_vxlan_encap_error
38#undef _
39 VXLAN_ENCAP_N_ERROR,
40} vxlan_encap_error_t;
41
42typedef enum {
Ed Warnickecb9cada2015-12-08 15:45:58 -070043 VXLAN_ENCAP_NEXT_DROP,
44 VXLAN_ENCAP_N_NEXT,
45} vxlan_encap_next_t;
46
47typedef struct {
48 u32 tunnel_index;
49 u32 vni;
50} vxlan_encap_trace_t;
51
52u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
53{
54 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
56 vxlan_encap_trace_t * t
57 = va_arg (*args, vxlan_encap_trace_t *);
58
John Loc42912d2016-11-07 18:30:47 -050059 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
60 t->tunnel_index, t->vni);
Ed Warnickecb9cada2015-12-08 15:45:58 -070061 return s;
62}
63
John Loc42912d2016-11-07 18:30:47 -050064always_inline uword
65vxlan_encap_inline (vlib_main_t * vm,
66 vlib_node_runtime_t * node,
67 vlib_frame_t * from_frame,
John Lof4215a62017-09-18 00:20:05 -040068 u8 is_ip4, u8 csum_offload)
Ed Warnickecb9cada2015-12-08 15:45:58 -070069{
70 u32 n_left_from, next_index, * from, * to_next;
71 vxlan_main_t * vxm = &vxlan_main;
72 vnet_main_t * vnm = vxm->vnet_main;
73 vnet_interface_main_t * im = &vnm->interface_main;
John Lo25d417f2018-02-15 15:47:53 -050074 vlib_combined_counter_main_t * tx_counter =
75 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
Ed Warnickecb9cada2015-12-08 15:45:58 -070076 u32 pkts_encapsulated = 0;
Damjan Marion586afd72017-04-05 19:18:20 +020077 u32 thread_index = vlib_get_thread_index();
John Loc42912d2016-11-07 18:30:47 -050078 u32 sw_if_index0 = 0, sw_if_index1 = 0;
79 u32 next0 = 0, next1 = 0;
John Loc42912d2016-11-07 18:30:47 -050080 vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
John Lo25d417f2018-02-15 15:47:53 -050081 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
Ed Warnickecb9cada2015-12-08 15:45:58 -070082
83 from = vlib_frame_vector_args (from_frame);
84 n_left_from = from_frame->n_vectors;
85
86 next_index = node->cached_next_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -070087
Eyal Bari3ce0b082018-01-17 12:06:32 +020088 STATIC_ASSERT_SIZEOF(ip6_vxlan_header_t, 56);
89 STATIC_ASSERT_SIZEOF(ip4_vxlan_header_t, 36);
90
eyal bari82e21d72018-04-26 13:14:55 +030091 u8 const underlay_hdr_len = is_ip4 ?
Eyal Bari3ce0b082018-01-17 12:06:32 +020092 sizeof(ip4_vxlan_header_t) : sizeof(ip6_vxlan_header_t);
eyal bari82e21d72018-04-26 13:14:55 +030093 u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
Eyal Bari3ce0b082018-01-17 12:06:32 +020094 u16 const l3_len = is_ip4 ? sizeof(ip4_header_t) : sizeof(ip6_header_t);
95 u32 const csum_flags = is_ip4 ?
96 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
97 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
98 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
99
Ed Warnickecb9cada2015-12-08 15:45:58 -0700100 while (n_left_from > 0)
101 {
102 u32 n_left_to_next;
103
104 vlib_get_next_frame (vm, node, next_index,
105 to_next, n_left_to_next);
106
107 while (n_left_from >= 4 && n_left_to_next >= 2)
108 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109 /* Prefetch next iteration. */
110 {
111 vlib_buffer_t * p2, * p3;
112
113 p2 = vlib_get_buffer (vm, from[2]);
114 p3 = vlib_get_buffer (vm, from[3]);
115
116 vlib_prefetch_buffer_header (p2, LOAD);
117 vlib_prefetch_buffer_header (p3, LOAD);
118
Zhiyong Yangb0073e22018-11-05 03:45:25 -0500119 CLIB_PREFETCH (p2->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
120 CLIB_PREFETCH (p3->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121 }
122
eyal bari82e21d72018-04-26 13:14:55 +0300123 u32 bi0 = to_next[0] = from[0];
124 u32 bi1 = to_next[1] = from[1];
125 from += 2;
126 to_next += 2;
127 n_left_to_next -= 2;
128 n_left_from -= 2;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200129
130 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
131 vlib_buffer_t * b1 = vlib_get_buffer (vm, bi1);
132 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
133 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
134
John Loc42912d2016-11-07 18:30:47 -0500135 /* Get next node index and adj index from tunnel next_dpo */
136 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
137 {
138 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
John Lo25d417f2018-02-15 15:47:53 -0500139 vnet_hw_interface_t *hi0 =
140 vnet_get_sup_hw_interface (vnm, sw_if_index0);
John Loc42912d2016-11-07 18:30:47 -0500141 t0 = &vxm->tunnels[hi0->dev_instance];
John Lo25d417f2018-02-15 15:47:53 -0500142 /* Note: change to always set next0 if it may set to drop */
John Loc42912d2016-11-07 18:30:47 -0500143 next0 = t0->next_dpo.dpoi_next_node;
John Lo25d417f2018-02-15 15:47:53 -0500144 dpoi_idx0 = t0->next_dpo.dpoi_index;
John Loc42912d2016-11-07 18:30:47 -0500145 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146
John Loc42912d2016-11-07 18:30:47 -0500147 /* Get next node index and adj index from tunnel next_dpo */
148 if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
149 {
John Lo25d417f2018-02-15 15:47:53 -0500150 if (sw_if_index0 == vnet_buffer(b1)->sw_if_index[VLIB_TX])
151 {
152 sw_if_index1 = sw_if_index0;
153 t1 = t0;
154 next1 = next0;
155 dpoi_idx1 = dpoi_idx0;
156 }
157 else
158 {
159 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
160 vnet_hw_interface_t *hi1 =
161 vnet_get_sup_hw_interface (vnm, sw_if_index1);
162 t1 = &vxm->tunnels[hi1->dev_instance];
163 /* Note: change to always set next1 if it may set to drop */
164 next1 = t1->next_dpo.dpoi_next_node;
165 dpoi_idx1 = t1->next_dpo.dpoi_index;
166 }
John Loc42912d2016-11-07 18:30:47 -0500167 }
John Lo25d417f2018-02-15 15:47:53 -0500168
169 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
170 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171
eyal bari82e21d72018-04-26 13:14:55 +0300172 ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
173 ASSERT(t1->rewrite_header.data_bytes == underlay_hdr_len);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700174
Eyal Bari3ce0b082018-01-17 12:06:32 +0200175 vlib_buffer_advance (b0, -underlay_hdr_len);
176 vlib_buffer_advance (b1, -underlay_hdr_len);
177
178 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
179 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
180 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
181 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
182
eyal bari82e21d72018-04-26 13:14:55 +0300183 void * underlay0 = vlib_buffer_get_current(b0);
184 void * underlay1 = vlib_buffer_get_current(b1);
185
186 /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
187 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
188 * use memcpy as a workaround */
189 clib_memcpy(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
190 clib_memcpy(underlay1, t1->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
191
Eyal Bari3ce0b082018-01-17 12:06:32 +0200192 ip4_header_t * ip4_0, * ip4_1;
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700193 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200194 ip6_header_t * ip6_0, * ip6_1;
195 udp_header_t * udp0, * udp1;
196 u8 * l3_0, * l3_1;
John Loc42912d2016-11-07 18:30:47 -0500197 if (is_ip4)
198 {
eyal bari82e21d72018-04-26 13:14:55 +0300199 ip4_vxlan_header_t * hdr0 = underlay0;
200 ip4_vxlan_header_t * hdr1 = underlay1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201
John Loc42912d2016-11-07 18:30:47 -0500202 /* Fix the IP4 checksum and length */
Eyal Bari3ce0b082018-01-17 12:06:32 +0200203 ip4_0 = &hdr0->ip4;
204 ip4_1 = &hdr1->ip4;
205 ip4_0->length = clib_host_to_net_u16 (len0);
206 ip4_1->length = clib_host_to_net_u16 (len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700208 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
209 {
210 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
211 ip4_0->tos = ip4_0_tos;
212 }
213 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
214 {
215 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
216 ip4_1->tos = ip4_1_tos;
217 }
218
Eyal Bari3ce0b082018-01-17 12:06:32 +0200219 l3_0 = (u8 *)ip4_0;
220 l3_1 = (u8 *)ip4_1;
221 udp0 = &hdr0->udp;
222 udp1 = &hdr1->udp;
John Loc42912d2016-11-07 18:30:47 -0500223 }
224 else /* ipv6 */
225 {
eyal bari82e21d72018-04-26 13:14:55 +0300226 ip6_vxlan_header_t * hdr0 = underlay0;
227 ip6_vxlan_header_t * hdr1 = underlay1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228
John Loc42912d2016-11-07 18:30:47 -0500229 /* Fix IP6 payload length */
Eyal Bari3ce0b082018-01-17 12:06:32 +0200230 ip6_0 = &hdr0->ip6;
231 ip6_1 = &hdr1->ip6;
232 ip6_0->payload_length = payload_l0;
233 ip6_1->payload_length = payload_l1;
John Loc42912d2016-11-07 18:30:47 -0500234
Eyal Bari3ce0b082018-01-17 12:06:32 +0200235 l3_0 = (u8 *)ip6_0;
236 l3_1 = (u8 *)ip6_1;
237 udp0 = &hdr0->udp;
238 udp1 = &hdr1->udp;
John Loc42912d2016-11-07 18:30:47 -0500239 }
Chris Luke99cb3352016-04-26 10:49:53 -0400240
Eyal Bari3ce0b082018-01-17 12:06:32 +0200241 /* Fix UDP length and set source port */
242 udp0->length = payload_l0;
243 udp0->src_port = flow_hash0;
244 udp1->length = payload_l1;
245 udp1->src_port = flow_hash1;
246
247 if (csum_offload)
248 {
249 b0->flags |= csum_flags;
250 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
251 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
252 b1->flags |= csum_flags;
253 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
254 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
255 }
256 /* IPv4 UDP checksum only if checksum offload is used */
257 else if (is_ip4)
258 {
259 ip_csum_t sum0 = ip4_0->checksum;
260 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
261 length /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700262 if (PREDICT_FALSE (ip4_0_tos))
263 {
264 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
265 tos /* changed member */);
266 }
Eyal Bari3ce0b082018-01-17 12:06:32 +0200267 ip4_0->checksum = ip_csum_fold (sum0);
268 ip_csum_t sum1 = ip4_1->checksum;
269 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
270 length /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700271 if (PREDICT_FALSE (ip4_1_tos))
272 {
273 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
274 tos /* changed member */);
275 }
Eyal Bari3ce0b082018-01-17 12:06:32 +0200276 ip4_1->checksum = ip_csum_fold (sum1);
277 }
278 /* IPv6 UDP checksum is mandatory */
279 else
280 {
281 int bogus = 0;
282
283 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
284 (vm, b0, ip6_0, &bogus);
285 ASSERT(bogus == 0);
286 if (udp0->checksum == 0)
287 udp0->checksum = 0xffff;
288 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
289 (vm, b1, ip6_1, &bogus);
290 ASSERT(bogus == 0);
291 if (udp1->checksum == 0)
292 udp1->checksum = 0xffff;
293 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700294
Zhiyong Yang70fda4c2018-09-04 22:45:17 -0400295 if (sw_if_index0 == sw_if_index1)
296 {
297 vlib_increment_combined_counter (tx_counter, thread_index,
298 sw_if_index0, 2, len0 + len1);
299 }
300 else
301 {
Eyal Bari0f4b1842018-04-12 12:39:51 +0300302 vlib_increment_combined_counter (tx_counter, thread_index,
303 sw_if_index0, 1, len0);
304 vlib_increment_combined_counter (tx_counter, thread_index,
305 sw_if_index1, 1, len1);
Zhiyong Yang70fda4c2018-09-04 22:45:17 -0400306 }
Eyal Bari3ce0b082018-01-17 12:06:32 +0200307 pkts_encapsulated += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
309 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
310 {
311 vxlan_encap_trace_t *tr =
312 vlib_add_trace (vm, node, b0, sizeof (*tr));
313 tr->tunnel_index = t0 - vxm->tunnels;
314 tr->vni = t0->vni;
315 }
316
317 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
318 {
319 vxlan_encap_trace_t *tr =
320 vlib_add_trace (vm, node, b1, sizeof (*tr));
321 tr->tunnel_index = t1 - vxm->tunnels;
322 tr->vni = t1->vni;
323 }
324
325 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
326 to_next, n_left_to_next,
327 bi0, bi1, next0, next1);
328 }
329
330 while (n_left_from > 0 && n_left_to_next > 0)
331 {
eyal bari82e21d72018-04-26 13:14:55 +0300332 u32 bi0 = to_next[0] = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333 from += 1;
334 to_next += 1;
335 n_left_from -= 1;
336 n_left_to_next -= 1;
337
eyal bari82e21d72018-04-26 13:14:55 +0300338 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
339 u32 flow_hash0 = vnet_l2_compute_flow_hash(b0);
340
John Loc42912d2016-11-07 18:30:47 -0500341 /* Get next node index and adj index from tunnel next_dpo */
342 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
343 {
344 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
John Lo25d417f2018-02-15 15:47:53 -0500345 vnet_hw_interface_t *hi0 =
346 vnet_get_sup_hw_interface (vnm, sw_if_index0);
John Loc42912d2016-11-07 18:30:47 -0500347 t0 = &vxm->tunnels[hi0->dev_instance];
348 /* Note: change to always set next0 if it may be set to drop */
349 next0 = t0->next_dpo.dpoi_next_node;
John Lo25d417f2018-02-15 15:47:53 -0500350 dpoi_idx0 = t0->next_dpo.dpoi_index;
John Loc42912d2016-11-07 18:30:47 -0500351 }
John Lo25d417f2018-02-15 15:47:53 -0500352 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353
eyal bari82e21d72018-04-26 13:14:55 +0300354 ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
355
Eyal Bari3ce0b082018-01-17 12:06:32 +0200356 vlib_buffer_advance (b0, -underlay_hdr_len);
eyal bari82e21d72018-04-26 13:14:55 +0300357 void * underlay0 = vlib_buffer_get_current(b0);
358
359 /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
360 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
361 * use memcpy as a workaround */
362 clib_memcpy(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700363
Eyal Bari3ce0b082018-01-17 12:06:32 +0200364 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
365 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
366
367 udp_header_t * udp0;
368 ip4_header_t * ip4_0;
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700369 qos_bits_t ip4_0_tos = 0;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200370 ip6_header_t * ip6_0;
371 u8 * l3_0;
John Loc42912d2016-11-07 18:30:47 -0500372 if (is_ip4)
373 {
eyal bari82e21d72018-04-26 13:14:55 +0300374 ip4_vxlan_header_t * hdr = underlay0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700375
John Loc42912d2016-11-07 18:30:47 -0500376 /* Fix the IP4 checksum and length */
Eyal Bari3ce0b082018-01-17 12:06:32 +0200377 ip4_0 = &hdr->ip4;
378 ip4_0->length = clib_host_to_net_u16 (len0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700380 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
381 {
382 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
383 ip4_0->tos = ip4_0_tos;
384 }
385
Eyal Bari3ce0b082018-01-17 12:06:32 +0200386 l3_0 = (u8*)ip4_0;
387 udp0 = &hdr->udp;
John Loc42912d2016-11-07 18:30:47 -0500388 }
John Loc42912d2016-11-07 18:30:47 -0500389 else /* ip6 path */
390 {
eyal bari82e21d72018-04-26 13:14:55 +0300391 ip6_vxlan_header_t * hdr = underlay0;
Chris Luke99cb3352016-04-26 10:49:53 -0400392
John Loc42912d2016-11-07 18:30:47 -0500393 /* Fix IP6 payload length */
Eyal Bari3ce0b082018-01-17 12:06:32 +0200394 ip6_0 = &hdr->ip6;
395 ip6_0->payload_length = payload_l0;
Chris Luke99cb3352016-04-26 10:49:53 -0400396
Eyal Bari3ce0b082018-01-17 12:06:32 +0200397 l3_0 = (u8 *)ip6_0;
398 udp0 = &hdr->udp;
John Loc42912d2016-11-07 18:30:47 -0500399 }
400
Eyal Bari3ce0b082018-01-17 12:06:32 +0200401 /* Fix UDP length and set source port */
402 udp0->length = payload_l0;
403 udp0->src_port = flow_hash0;
404
405 if (csum_offload)
406 {
407 b0->flags |= csum_flags;
408 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
409 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
410 }
411 /* IPv4 UDP checksum only if checksum offload is used */
412 else if (is_ip4)
413 {
414 ip_csum_t sum0 = ip4_0->checksum;
415 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
416 length /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700417 if (PREDICT_FALSE (ip4_0_tos))
418 {
419 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
420 tos /* changed member */);
421 }
Eyal Bari3ce0b082018-01-17 12:06:32 +0200422 ip4_0->checksum = ip_csum_fold (sum0);
423 }
424 /* IPv6 UDP checksum is mandatory */
425 else
426 {
427 int bogus = 0;
428
429 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
430 (vm, b0, ip6_0, &bogus);
431 ASSERT(bogus == 0);
432 if (udp0->checksum == 0)
433 udp0->checksum = 0xffff;
434 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435
Eyal Bari0f4b1842018-04-12 12:39:51 +0300436 vlib_increment_combined_counter (tx_counter, thread_index,
437 sw_if_index0, 1, len0);
Eyal Bari3ce0b082018-01-17 12:06:32 +0200438 pkts_encapsulated ++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700439
440 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
441 {
442 vxlan_encap_trace_t *tr =
443 vlib_add_trace (vm, node, b0, sizeof (*tr));
444 tr->tunnel_index = t0 - vxm->tunnels;
445 tr->vni = t0->vni;
446 }
447 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
448 to_next, n_left_to_next,
449 bi0, next0);
450 }
451
452 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
453 }
454
455 /* Do we still need this now that tunnel tx stats is kept? */
456 vlib_node_increment_counter (vm, node->node_index,
457 VXLAN_ENCAP_ERROR_ENCAPSULATED,
458 pkts_encapsulated);
459
Ed Warnickecb9cada2015-12-08 15:45:58 -0700460 return from_frame->n_vectors;
461}
462
John Loc42912d2016-11-07 18:30:47 -0500463static uword
464vxlan4_encap (vlib_main_t * vm,
465 vlib_node_runtime_t * node,
466 vlib_frame_t * from_frame)
467{
John Lof4215a62017-09-18 00:20:05 -0400468 /* Disable chksum offload as setup overhead in tx node is not worthwhile
469 for ip4 header checksum only, unless udp checksum is also required */
470 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
471 /* csum_offload */ 0);
John Loc42912d2016-11-07 18:30:47 -0500472}
473
474static uword
475vxlan6_encap (vlib_main_t * vm,
476 vlib_node_runtime_t * node,
477 vlib_frame_t * from_frame)
478{
John Lof4215a62017-09-18 00:20:05 -0400479 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
480 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
481 /* csum_offload */ 1);
John Loc42912d2016-11-07 18:30:47 -0500482}
483
484VLIB_REGISTER_NODE (vxlan4_encap_node) = {
485 .function = vxlan4_encap,
486 .name = "vxlan4-encap",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700487 .vector_size = sizeof (u32),
488 .format_trace = format_vxlan_encap_trace,
489 .type = VLIB_NODE_TYPE_INTERNAL,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700490 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
491 .error_strings = vxlan_encap_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700492 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493 .next_nodes = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700494 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
495 },
496};
Damjan Marion1c80e832016-05-11 23:07:18 +0200497
John Loc42912d2016-11-07 18:30:47 -0500498VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_encap_node, vxlan4_encap)
499
500VLIB_REGISTER_NODE (vxlan6_encap_node) = {
501 .function = vxlan6_encap,
502 .name = "vxlan6-encap",
503 .vector_size = sizeof (u32),
504 .format_trace = format_vxlan_encap_trace,
505 .type = VLIB_NODE_TYPE_INTERNAL,
506 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
507 .error_strings = vxlan_encap_error_strings,
508 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
509 .next_nodes = {
510 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
511 },
512};
513
514VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_encap_node, vxlan6_encap)
Damjan Marion1c80e832016-05-11 23:07:18 +0200515