blob: 07142c8a521d965c21dc372113a5e62227d4dc88 [file] [log] [blame]
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/vxlan-gbp/vxlan_gbp.h>
21#include <vnet/qos/qos_types.h>
22#include <vnet/adj/rewrite.h>
23
24/* Statistics (not all errors) */
25#define foreach_vxlan_gbp_encap_error \
26_(ENCAPSULATED, "good packets encapsulated")
27
28static char *vxlan_gbp_encap_error_strings[] = {
29#define _(sym,string) string,
30 foreach_vxlan_gbp_encap_error
31#undef _
32};
33
34typedef enum
35{
36#define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
37 foreach_vxlan_gbp_encap_error
38#undef _
39 VXLAN_GBP_ENCAP_N_ERROR,
40} vxlan_gbp_encap_error_t;
41
42typedef enum
43{
44 VXLAN_GBP_ENCAP_NEXT_DROP,
45 VXLAN_GBP_ENCAP_N_NEXT,
46} vxlan_gbp_encap_next_t;
47
48typedef struct
49{
50 u32 tunnel_index;
51 u32 vni;
52 u16 sclass;
53} vxlan_gbp_encap_trace_t;
54
55u8 *
56format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
57{
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
61
62 s = format (s, "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d",
63 t->tunnel_index, t->vni, t->sclass);
64 return s;
65}
66
67always_inline uword
68vxlan_gbp_encap_inline (vlib_main_t * vm,
69 vlib_node_runtime_t * node,
70 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
71{
72 u32 n_left_from, next_index, *from, *to_next;
73 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
74 vnet_main_t *vnm = vxm->vnet_main;
75 vnet_interface_main_t *im = &vnm->interface_main;
76 vlib_combined_counter_main_t *tx_counter =
77 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
78 u32 pkts_encapsulated = 0;
79 u32 thread_index = vlib_get_thread_index ();
80 u32 sw_if_index0 = 0, sw_if_index1 = 0;
81 u32 next0 = 0, next1 = 0;
82 vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
83 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
84
85 from = vlib_frame_vector_args (from_frame);
86 n_left_from = from_frame->n_vectors;
87
88 next_index = node->cached_next_index;
89
90 STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
91 STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
92
93 u8 const underlay_hdr_len = is_ip4 ?
94 sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
95 u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
96 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
97 u32 const csum_flags = is_ip4 ?
98 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
99 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM : VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
100
101 while (n_left_from > 0)
102 {
103 u32 n_left_to_next;
104
105 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
106
107 while (n_left_from >= 4 && n_left_to_next >= 2)
108 {
109 /* Prefetch next iteration. */
110 {
111 vlib_buffer_t *p2, *p3;
112
113 p2 = vlib_get_buffer (vm, from[2]);
114 p3 = vlib_get_buffer (vm, from[3]);
115
116 vlib_prefetch_buffer_header (p2, LOAD);
117 vlib_prefetch_buffer_header (p3, LOAD);
118
119 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
120 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
121 }
122
123 u32 bi0 = to_next[0] = from[0];
124 u32 bi1 = to_next[1] = from[1];
125 from += 2;
126 to_next += 2;
127 n_left_to_next -= 2;
128 n_left_from -= 2;
129
130 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
131 vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
132 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
133 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
134
135 /* Get next node index and adj index from tunnel next_dpo */
136 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
137 {
138 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
139 vnet_hw_interface_t *hi0 =
140 vnet_get_sup_hw_interface (vnm, sw_if_index0);
141 t0 = &vxm->tunnels[hi0->dev_instance];
142 /* Note: change to always set next0 if it may set to drop */
143 next0 = t0->next_dpo.dpoi_next_node;
144 dpoi_idx0 = t0->next_dpo.dpoi_index;
145 }
146
147 /* Get next node index and adj index from tunnel next_dpo */
148 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
149 {
150 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
151 {
152 sw_if_index1 = sw_if_index0;
153 t1 = t0;
154 next1 = next0;
155 dpoi_idx1 = dpoi_idx0;
156 }
157 else
158 {
159 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
160 vnet_hw_interface_t *hi1 =
161 vnet_get_sup_hw_interface (vnm, sw_if_index1);
162 t1 = &vxm->tunnels[hi1->dev_instance];
163 /* Note: change to always set next1 if it may set to drop */
164 next1 = t1->next_dpo.dpoi_next_node;
165 dpoi_idx1 = t1->next_dpo.dpoi_index;
166 }
167 }
168
169 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
170 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
171
172 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
173 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
174
175 vlib_buffer_advance (b0, -underlay_hdr_len);
176 vlib_buffer_advance (b1, -underlay_hdr_len);
177
178 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
179 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
180 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
181 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
182
183 void *underlay0 = vlib_buffer_get_current (b0);
184 void *underlay1 = vlib_buffer_get_current (b1);
185
186 /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
187 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
188 * use memcpy as a workaround */
189 clib_memcpy (underlay0, t0->rewrite_header.data + rw_hdr_offset,
190 underlay_hdr_len);
191 clib_memcpy (underlay1, t1->rewrite_header.data + rw_hdr_offset,
192 underlay_hdr_len);
193
194 ip4_header_t *ip4_0, *ip4_1;
195 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
196 ip6_header_t *ip6_0, *ip6_1;
197 udp_header_t *udp0, *udp1;
198 vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
199 u8 *l3_0, *l3_1;
200 if (is_ip4)
201 {
202 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
203 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
204
205 /* Fix the IP4 checksum and length */
206 ip4_0 = &hdr0->ip4;
207 ip4_1 = &hdr1->ip4;
208 ip4_0->length = clib_host_to_net_u16 (len0);
209 ip4_1->length = clib_host_to_net_u16 (len1);
210
211 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
212 {
213 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
214 ip4_0->tos = ip4_0_tos;
215 }
216 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
217 {
218 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
219 ip4_1->tos = ip4_1_tos;
220 }
221
222 l3_0 = (u8 *) ip4_0;
223 l3_1 = (u8 *) ip4_1;
224 udp0 = &hdr0->udp;
225 udp1 = &hdr1->udp;
226 vxlan_gbp0 = &hdr0->vxlan_gbp;
227 vxlan_gbp1 = &hdr1->vxlan_gbp;
228 }
229 else /* ipv6 */
230 {
231 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
232 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
233
234 /* Fix IP6 payload length */
235 ip6_0 = &hdr0->ip6;
236 ip6_1 = &hdr1->ip6;
237 ip6_0->payload_length = payload_l0;
238 ip6_1->payload_length = payload_l1;
239
240 l3_0 = (u8 *) ip6_0;
241 l3_1 = (u8 *) ip6_1;
242 udp0 = &hdr0->udp;
243 udp1 = &hdr1->udp;
244 vxlan_gbp0 = &hdr0->vxlan_gbp;
245 vxlan_gbp1 = &hdr1->vxlan_gbp;
246 }
247
248 /* Fix UDP length and set source port */
249 udp0->length = payload_l0;
250 udp0->src_port = flow_hash0;
251 udp1->length = payload_l1;
252 udp1->src_port = flow_hash1;
253
254 /* set source class and gpflags */
255 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
256 vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
257 vxlan_gbp0->sclass =
258 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.src_epg);
259 vxlan_gbp1->sclass =
260 clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.src_epg);
261
262 if (csum_offload)
263 {
264 b0->flags |= csum_flags;
265 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
266 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
267 b1->flags |= csum_flags;
268 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
269 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
270 }
271 /* IPv4 UDP checksum only if checksum offload is used */
272 else if (is_ip4)
273 {
274 ip_csum_t sum0 = ip4_0->checksum;
275 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
276 length /* changed member */ );
277 if (PREDICT_FALSE (ip4_0_tos))
278 {
279 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
280 tos /* changed member */ );
281 }
282 ip4_0->checksum = ip_csum_fold (sum0);
283 ip_csum_t sum1 = ip4_1->checksum;
284 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
285 length /* changed member */ );
286 if (PREDICT_FALSE (ip4_1_tos))
287 {
288 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
289 tos /* changed member */ );
290 }
291 ip4_1->checksum = ip_csum_fold (sum1);
292 }
293 /* IPv6 UDP checksum is mandatory */
294 else
295 {
296 int bogus = 0;
297
298 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
299 (vm, b0, ip6_0, &bogus);
300 ASSERT (bogus == 0);
301 if (udp0->checksum == 0)
302 udp0->checksum = 0xffff;
303 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
304 (vm, b1, ip6_1, &bogus);
305 ASSERT (bogus == 0);
306 if (udp1->checksum == 0)
307 udp1->checksum = 0xffff;
308 }
309
310 vlib_increment_combined_counter (tx_counter, thread_index,
311 sw_if_index0, 1, len0);
312 vlib_increment_combined_counter (tx_counter, thread_index,
313 sw_if_index1, 1, len1);
314 pkts_encapsulated += 2;
315
316 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
317 {
318 vxlan_gbp_encap_trace_t *tr =
319 vlib_add_trace (vm, node, b0, sizeof (*tr));
320 tr->tunnel_index = t0 - vxm->tunnels;
321 tr->vni = t0->vni;
322 tr->sclass = vnet_buffer2 (b0)->gbp.src_epg;
323 }
324
325 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
326 {
327 vxlan_gbp_encap_trace_t *tr =
328 vlib_add_trace (vm, node, b1, sizeof (*tr));
329 tr->tunnel_index = t1 - vxm->tunnels;
330 tr->vni = t1->vni;
331 tr->sclass = vnet_buffer2 (b1)->gbp.src_epg;
332 }
333
334 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
335 to_next, n_left_to_next,
336 bi0, bi1, next0, next1);
337 }
338
339 while (n_left_from > 0 && n_left_to_next > 0)
340 {
341 u32 bi0 = to_next[0] = from[0];
342 from += 1;
343 to_next += 1;
344 n_left_from -= 1;
345 n_left_to_next -= 1;
346
347 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
348 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
349
350 /* Get next node index and adj index from tunnel next_dpo */
351 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
352 {
353 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
354 vnet_hw_interface_t *hi0 =
355 vnet_get_sup_hw_interface (vnm, sw_if_index0);
356 t0 = &vxm->tunnels[hi0->dev_instance];
357 /* Note: change to always set next0 if it may be set to drop */
358 next0 = t0->next_dpo.dpoi_next_node;
359 dpoi_idx0 = t0->next_dpo.dpoi_index;
360 }
361 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
362
363 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
364
365 vlib_buffer_advance (b0, -underlay_hdr_len);
366 void *underlay0 = vlib_buffer_get_current (b0);
367
368 /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
369 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
370 * use memcpy as a workaround */
371 clib_memcpy (underlay0, t0->rewrite_header.data + rw_hdr_offset,
372 underlay_hdr_len);
373
374 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
375 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
376
377 vxlan_gbp_header_t *vxlan_gbp0;
378 udp_header_t *udp0;
379 ip4_header_t *ip4_0;
380 qos_bits_t ip4_0_tos = 0;
381 ip6_header_t *ip6_0;
382 u8 *l3_0;
383 if (is_ip4)
384 {
385 ip4_vxlan_gbp_header_t *hdr = underlay0;
386
387 /* Fix the IP4 checksum and length */
388 ip4_0 = &hdr->ip4;
389 ip4_0->length = clib_host_to_net_u16 (len0);
390
391 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
392 {
393 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
394 ip4_0->tos = ip4_0_tos;
395 }
396
397 l3_0 = (u8 *) ip4_0;
398 udp0 = &hdr->udp;
399 vxlan_gbp0 = &hdr->vxlan_gbp;
400 }
401 else /* ip6 path */
402 {
403 ip6_vxlan_gbp_header_t *hdr = underlay0;
404
405 /* Fix IP6 payload length */
406 ip6_0 = &hdr->ip6;
407 ip6_0->payload_length = payload_l0;
408
409 l3_0 = (u8 *) ip6_0;
410 udp0 = &hdr->udp;
411 vxlan_gbp0 = &hdr->vxlan_gbp;
412 }
413
414 /* Fix UDP length and set source port */
415 udp0->length = payload_l0;
416 udp0->src_port = flow_hash0;
417
418 /* set source class and gpflags */
419 vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
420 vxlan_gbp0->sclass =
421 clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.src_epg);
422
423 if (csum_offload)
424 {
425 b0->flags |= csum_flags;
426 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
427 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
428 }
429 /* IPv4 UDP checksum only if checksum offload is used */
430 else if (is_ip4)
431 {
432 ip_csum_t sum0 = ip4_0->checksum;
433 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
434 length /* changed member */ );
435 if (PREDICT_FALSE (ip4_0_tos))
436 {
437 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
438 tos /* changed member */ );
439 }
440 ip4_0->checksum = ip_csum_fold (sum0);
441 }
442 /* IPv6 UDP checksum is mandatory */
443 else
444 {
445 int bogus = 0;
446
447 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
448 (vm, b0, ip6_0, &bogus);
449 ASSERT (bogus == 0);
450 if (udp0->checksum == 0)
451 udp0->checksum = 0xffff;
452 }
453
454 vlib_increment_combined_counter (tx_counter, thread_index,
455 sw_if_index0, 1, len0);
456 pkts_encapsulated++;
457
458 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
459 {
460 vxlan_gbp_encap_trace_t *tr =
461 vlib_add_trace (vm, node, b0, sizeof (*tr));
462 tr->tunnel_index = t0 - vxm->tunnels;
463 tr->vni = t0->vni;
464 tr->sclass = vnet_buffer2 (b0)->gbp.src_epg;
465 }
466 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
467 to_next, n_left_to_next,
468 bi0, next0);
469 }
470
471 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
472 }
473
474 /* Do we still need this now that tunnel tx stats is kept? */
475 vlib_node_increment_counter (vm, node->node_index,
476 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
477 pkts_encapsulated);
478
479 return from_frame->n_vectors;
480}
481
482static uword
483vxlan4_gbp_encap (vlib_main_t * vm,
484 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
485{
486 /* Disable chksum offload as setup overhead in tx node is not worthwhile
487 for ip4 header checksum only, unless udp checksum is also required */
488 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
489 /* csum_offload */ 0);
490}
491
492static uword
493vxlan6_gbp_encap (vlib_main_t * vm,
494 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
495{
496 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
497 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
498 /* csum_offload */ 1);
499}
500
501/* *INDENT-OFF* */
502VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
503{
504 .function = vxlan4_gbp_encap,
505 .name = "vxlan4-gbp-encap",
506 .vector_size = sizeof (u32),
507 .format_trace = format_vxlan_gbp_encap_trace,
508 .type = VLIB_NODE_TYPE_INTERNAL,
509 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
510 .error_strings = vxlan_gbp_encap_error_strings,
511 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
512 .next_nodes = {
513 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
514 },
515};
516
517VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gbp_encap_node, vxlan4_gbp_encap)
518
519VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
520{
521 .function = vxlan6_gbp_encap,
522 .name = "vxlan6-gbp-encap",
523 .vector_size = sizeof (u32),
524 .format_trace = format_vxlan_gbp_encap_trace,
525 .type = VLIB_NODE_TYPE_INTERNAL,
526 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
527 .error_strings = vxlan_gbp_encap_error_strings,
528 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
529 .next_nodes = {
530 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
531 },
532};
533
534VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gbp_encap_node, vxlan6_gbp_encap)
535/* *INDENT-ON* */
536
537/*
538 * fd.io coding-style-patch-verification: ON
539 *
540 * Local Variables:
541 * eval: (c-set-style "gnu")
542 * End:
543 */