blob: 4514fdae7a705b09b2a7ac43a31334c2b711b3f7 [file] [log] [blame]
Mohsin Kazmi61b94c62018-08-20 18:32:39 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
Mohsin Kazmidbd5fb32020-06-02 15:21:03 +020020#include <vnet/interface_output.h>
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020021#include <vnet/vxlan-gbp/vxlan_gbp.h>
22#include <vnet/qos/qos_types.h>
23#include <vnet/adj/rewrite.h>
24
25/* Statistics (not all errors) */
26#define foreach_vxlan_gbp_encap_error \
27_(ENCAPSULATED, "good packets encapsulated")
28
29static char *vxlan_gbp_encap_error_strings[] = {
30#define _(sym,string) string,
31 foreach_vxlan_gbp_encap_error
32#undef _
33};
34
35typedef enum
36{
37#define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
38 foreach_vxlan_gbp_encap_error
39#undef _
40 VXLAN_GBP_ENCAP_N_ERROR,
41} vxlan_gbp_encap_error_t;
42
43typedef enum
44{
45 VXLAN_GBP_ENCAP_NEXT_DROP,
46 VXLAN_GBP_ENCAP_N_NEXT,
47} vxlan_gbp_encap_next_t;
48
49typedef struct
50{
51 u32 tunnel_index;
52 u32 vni;
53 u16 sclass;
Neale Ranns45db8852019-01-09 00:04:04 -080054 u8 flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020055} vxlan_gbp_encap_trace_t;
56
Filip Tehlare1714d32019-03-05 03:01:43 -080057#ifndef CLIB_MARCH_VARIANT
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020058u8 *
59format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
60{
61 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63 vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
64
Neale Ranns45db8852019-01-09 00:04:04 -080065 s =
66 format (s,
Neale Ranns0e967e02019-03-28 08:01:47 -070067 "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %U",
68 t->tunnel_index, t->vni, t->sclass,
69 format_vxlan_gbp_header_gpflags, t->flags);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020070 return s;
71}
Filip Tehlare1714d32019-03-05 03:01:43 -080072#endif /* CLIB_MARCH_VARIANT */
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020073
74always_inline uword
75vxlan_gbp_encap_inline (vlib_main_t * vm,
76 vlib_node_runtime_t * node,
77 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
78{
79 u32 n_left_from, next_index, *from, *to_next;
80 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
81 vnet_main_t *vnm = vxm->vnet_main;
82 vnet_interface_main_t *im = &vnm->interface_main;
83 vlib_combined_counter_main_t *tx_counter =
84 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
85 u32 pkts_encapsulated = 0;
86 u32 thread_index = vlib_get_thread_index ();
87 u32 sw_if_index0 = 0, sw_if_index1 = 0;
88 u32 next0 = 0, next1 = 0;
89 vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
90 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
Zhiyong Yang257573d2019-05-16 05:24:17 -040091 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020092
93 from = vlib_frame_vector_args (from_frame);
94 n_left_from = from_frame->n_vectors;
Zhiyong Yang257573d2019-05-16 05:24:17 -040095 vlib_get_buffers (vm, from, bufs, n_left_from);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +020096
97 next_index = node->cached_next_index;
98
99 STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
100 STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
101
102 u8 const underlay_hdr_len = is_ip4 ?
103 sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200104 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
John Lob1b98f52019-08-03 18:25:49 -0400105 u32 const csum_flags = is_ip4 ? VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
Mohsin Kazmidbd5fb32020-06-02 15:21:03 +0200106 VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
107 VNET_BUFFER_F_L3_HDR_OFFSET_VALID | VNET_BUFFER_F_L4_HDR_OFFSET_VALID :
108 VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
109 VNET_BUFFER_F_L3_HDR_OFFSET_VALID | VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
110 u32 const inner_packet_csum_offload_flags =
111 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
112 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
113 u32 const inner_packet_removed_flags =
114 VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_IS_IP6 |
115 VNET_BUFFER_F_L2_HDR_OFFSET_VALID | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
116 VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200117
118 while (n_left_from > 0)
119 {
120 u32 n_left_to_next;
121
122 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
123
124 while (n_left_from >= 4 && n_left_to_next >= 2)
125 {
126 /* Prefetch next iteration. */
127 {
128 vlib_buffer_t *p2, *p3;
129
130 p2 = vlib_get_buffer (vm, from[2]);
131 p3 = vlib_get_buffer (vm, from[3]);
132
133 vlib_prefetch_buffer_header (p2, LOAD);
134 vlib_prefetch_buffer_header (p3, LOAD);
135
Zhiyong Yang257573d2019-05-16 05:24:17 -0400136 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
137 2 * CLIB_CACHE_LINE_BYTES, LOAD);
138 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
139 2 * CLIB_CACHE_LINE_BYTES, LOAD);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200140 }
141
142 u32 bi0 = to_next[0] = from[0];
143 u32 bi1 = to_next[1] = from[1];
144 from += 2;
145 to_next += 2;
146 n_left_to_next -= 2;
147 n_left_from -= 2;
148
Mohsin Kazmidbd5fb32020-06-02 15:21:03 +0200149 u32 or_flags = b[0]->flags | b[1]->flags;
150 if (csum_offload && (or_flags & inner_packet_csum_offload_flags))
151 {
152 /* Only calculate the non-GSO packet csum offload */
153 if ((b[0]->flags & VNET_BUFFER_F_GSO) == 0)
154 {
155 vnet_calc_checksums_inline (vm, b[0],
156 b[0]->flags &
157 VNET_BUFFER_F_IS_IP4,
158 b[0]->flags &
159 VNET_BUFFER_F_IS_IP6);
160 b[0]->flags &= ~inner_packet_removed_flags;
161 }
162 if ((b[1]->flags & VNET_BUFFER_F_GSO) == 0)
163 {
164 vnet_calc_checksums_inline (vm, b[1],
165 b[1]->flags &
166 VNET_BUFFER_F_IS_IP4,
167 b[1]->flags &
168 VNET_BUFFER_F_IS_IP6);
169 b[1]->flags &= ~inner_packet_removed_flags;
170 }
171 }
172
Zhiyong Yang257573d2019-05-16 05:24:17 -0400173 u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
174 u32 flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200175
176 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yang257573d2019-05-16 05:24:17 -0400177 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200178 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400179 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200180 vnet_hw_interface_t *hi0 =
181 vnet_get_sup_hw_interface (vnm, sw_if_index0);
182 t0 = &vxm->tunnels[hi0->dev_instance];
183 /* Note: change to always set next0 if it may set to drop */
184 next0 = t0->next_dpo.dpoi_next_node;
185 dpoi_idx0 = t0->next_dpo.dpoi_index;
186 }
187
188 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yang257573d2019-05-16 05:24:17 -0400189 if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200190 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400191 if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX])
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200192 {
193 sw_if_index1 = sw_if_index0;
194 t1 = t0;
195 next1 = next0;
196 dpoi_idx1 = dpoi_idx0;
197 }
198 else
199 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400200 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200201 vnet_hw_interface_t *hi1 =
202 vnet_get_sup_hw_interface (vnm, sw_if_index1);
203 t1 = &vxm->tunnels[hi1->dev_instance];
204 /* Note: change to always set next1 if it may set to drop */
205 next1 = t1->next_dpo.dpoi_next_node;
206 dpoi_idx1 = t1->next_dpo.dpoi_index;
207 }
208 }
209
Zhiyong Yang257573d2019-05-16 05:24:17 -0400210 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
211 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpoi_idx1;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200212
213 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
214 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
Zhiyong Yang257573d2019-05-16 05:24:17 -0400215 vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b[0]),
216 vlib_buffer_get_current (b[1]),
Benoît Ganne4af1a7f2019-03-01 14:14:10 +0100217 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200218
Zhiyong Yang257573d2019-05-16 05:24:17 -0400219 vlib_buffer_advance (b[0], -underlay_hdr_len);
220 vlib_buffer_advance (b[1], -underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200221
Zhiyong Yang257573d2019-05-16 05:24:17 -0400222 u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
223 u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200224 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
225 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
226
Zhiyong Yang257573d2019-05-16 05:24:17 -0400227 void *underlay0 = vlib_buffer_get_current (b[0]);
228 void *underlay1 = vlib_buffer_get_current (b[1]);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200229
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200230 ip4_header_t *ip4_0, *ip4_1;
231 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
232 ip6_header_t *ip6_0, *ip6_1;
233 udp_header_t *udp0, *udp1;
234 vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
235 u8 *l3_0, *l3_1;
236 if (is_ip4)
237 {
238 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
239 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
240
241 /* Fix the IP4 checksum and length */
242 ip4_0 = &hdr0->ip4;
243 ip4_1 = &hdr1->ip4;
244 ip4_0->length = clib_host_to_net_u16 (len0);
245 ip4_1->length = clib_host_to_net_u16 (len1);
246
Zhiyong Yang257573d2019-05-16 05:24:17 -0400247 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200248 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400249 ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200250 ip4_0->tos = ip4_0_tos;
251 }
Zhiyong Yang257573d2019-05-16 05:24:17 -0400252 if (PREDICT_FALSE (b[1]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200253 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400254 ip4_1_tos = vnet_buffer2 (b[1])->qos.bits;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200255 ip4_1->tos = ip4_1_tos;
256 }
257
258 l3_0 = (u8 *) ip4_0;
259 l3_1 = (u8 *) ip4_1;
260 udp0 = &hdr0->udp;
261 udp1 = &hdr1->udp;
262 vxlan_gbp0 = &hdr0->vxlan_gbp;
263 vxlan_gbp1 = &hdr1->vxlan_gbp;
264 }
265 else /* ipv6 */
266 {
267 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
268 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
269
270 /* Fix IP6 payload length */
271 ip6_0 = &hdr0->ip6;
272 ip6_1 = &hdr1->ip6;
273 ip6_0->payload_length = payload_l0;
274 ip6_1->payload_length = payload_l1;
275
276 l3_0 = (u8 *) ip6_0;
277 l3_1 = (u8 *) ip6_1;
278 udp0 = &hdr0->udp;
279 udp1 = &hdr1->udp;
280 vxlan_gbp0 = &hdr0->vxlan_gbp;
281 vxlan_gbp1 = &hdr1->vxlan_gbp;
282 }
283
284 /* Fix UDP length and set source port */
285 udp0->length = payload_l0;
286 udp0->src_port = flow_hash0;
287 udp1->length = payload_l1;
288 udp1->src_port = flow_hash1;
289
290 /* set source class and gpflags */
Zhiyong Yang257573d2019-05-16 05:24:17 -0400291 vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
292 vxlan_gbp1->gpflags = vnet_buffer2 (b[1])->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200293 vxlan_gbp0->sclass =
Zhiyong Yang257573d2019-05-16 05:24:17 -0400294 clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200295 vxlan_gbp1->sclass =
Zhiyong Yang257573d2019-05-16 05:24:17 -0400296 clib_host_to_net_u16 (vnet_buffer2 (b[1])->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200297
298 if (csum_offload)
299 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400300 b[0]->flags |= csum_flags;
301 vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
302 vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
303 b[1]->flags |= csum_flags;
304 vnet_buffer (b[1])->l3_hdr_offset = l3_1 - b[1]->data;
305 vnet_buffer (b[1])->l4_hdr_offset = (u8 *) udp1 - b[1]->data;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200306 }
307 /* IPv4 UDP checksum only if checksum offload is used */
308 else if (is_ip4)
309 {
310 ip_csum_t sum0 = ip4_0->checksum;
311 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
312 length /* changed member */ );
313 if (PREDICT_FALSE (ip4_0_tos))
314 {
315 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
316 tos /* changed member */ );
317 }
318 ip4_0->checksum = ip_csum_fold (sum0);
319 ip_csum_t sum1 = ip4_1->checksum;
320 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
321 length /* changed member */ );
322 if (PREDICT_FALSE (ip4_1_tos))
323 {
324 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
325 tos /* changed member */ );
326 }
327 ip4_1->checksum = ip_csum_fold (sum1);
328 }
329 /* IPv6 UDP checksum is mandatory */
330 else
331 {
332 int bogus = 0;
333
334 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
Zhiyong Yang257573d2019-05-16 05:24:17 -0400335 (vm, b[0], ip6_0, &bogus);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200336 ASSERT (bogus == 0);
337 if (udp0->checksum == 0)
338 udp0->checksum = 0xffff;
339 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
Zhiyong Yang257573d2019-05-16 05:24:17 -0400340 (vm, b[1], ip6_1, &bogus);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200341 ASSERT (bogus == 0);
342 if (udp1->checksum == 0)
343 udp1->checksum = 0xffff;
344 }
345
Shawn Ji623b4f82019-12-18 10:10:54 +0800346 /* save inner packet flow_hash for load-balance node */
347 vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
348 vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
349
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200350 vlib_increment_combined_counter (tx_counter, thread_index,
351 sw_if_index0, 1, len0);
352 vlib_increment_combined_counter (tx_counter, thread_index,
353 sw_if_index1, 1, len1);
354 pkts_encapsulated += 2;
355
Zhiyong Yang257573d2019-05-16 05:24:17 -0400356 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200357 {
358 vxlan_gbp_encap_trace_t *tr =
Zhiyong Yang257573d2019-05-16 05:24:17 -0400359 vlib_add_trace (vm, node, b[0], sizeof (*tr));
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200360 tr->tunnel_index = t0 - vxm->tunnels;
361 tr->vni = t0->vni;
Zhiyong Yang257573d2019-05-16 05:24:17 -0400362 tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
363 tr->flags = vnet_buffer2 (b[0])->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200364 }
365
Zhiyong Yang257573d2019-05-16 05:24:17 -0400366 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200367 {
368 vxlan_gbp_encap_trace_t *tr =
Zhiyong Yang257573d2019-05-16 05:24:17 -0400369 vlib_add_trace (vm, node, b[1], sizeof (*tr));
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200370 tr->tunnel_index = t1 - vxm->tunnels;
371 tr->vni = t1->vni;
Zhiyong Yang257573d2019-05-16 05:24:17 -0400372 tr->sclass = vnet_buffer2 (b[1])->gbp.sclass;
373 tr->flags = vnet_buffer2 (b[1])->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200374 }
Zhiyong Yang257573d2019-05-16 05:24:17 -0400375 b += 2;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200376
377 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
378 to_next, n_left_to_next,
379 bi0, bi1, next0, next1);
380 }
381
382 while (n_left_from > 0 && n_left_to_next > 0)
383 {
384 u32 bi0 = to_next[0] = from[0];
385 from += 1;
386 to_next += 1;
387 n_left_from -= 1;
388 n_left_to_next -= 1;
389
Mohsin Kazmidbd5fb32020-06-02 15:21:03 +0200390 if (csum_offload && (b[0]->flags & inner_packet_csum_offload_flags))
391 {
392 /* Only calculate the non-GSO packet csum offload */
393 if ((b[0]->flags & VNET_BUFFER_F_GSO) == 0)
394 {
395 vnet_calc_checksums_inline (vm, b[0],
396 b[0]->flags &
397 VNET_BUFFER_F_IS_IP4,
398 b[0]->flags &
399 VNET_BUFFER_F_IS_IP6);
400 b[0]->flags &= ~inner_packet_removed_flags;
401 }
402 }
403
Zhiyong Yang257573d2019-05-16 05:24:17 -0400404 u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200405
406 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yang257573d2019-05-16 05:24:17 -0400407 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200408 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400409 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200410 vnet_hw_interface_t *hi0 =
411 vnet_get_sup_hw_interface (vnm, sw_if_index0);
412 t0 = &vxm->tunnels[hi0->dev_instance];
413 /* Note: change to always set next0 if it may be set to drop */
414 next0 = t0->next_dpo.dpoi_next_node;
415 dpoi_idx0 = t0->next_dpo.dpoi_index;
416 }
Zhiyong Yang257573d2019-05-16 05:24:17 -0400417 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200418
419 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
Zhiyong Yang257573d2019-05-16 05:24:17 -0400420 vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b[0]),
Benoît Ganne4af1a7f2019-03-01 14:14:10 +0100421 underlay_hdr_len);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200422
Zhiyong Yang257573d2019-05-16 05:24:17 -0400423 vlib_buffer_advance (b[0], -underlay_hdr_len);
424 void *underlay0 = vlib_buffer_get_current (b[0]);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200425
Zhiyong Yang257573d2019-05-16 05:24:17 -0400426 u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200427 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
428
429 vxlan_gbp_header_t *vxlan_gbp0;
430 udp_header_t *udp0;
431 ip4_header_t *ip4_0;
432 qos_bits_t ip4_0_tos = 0;
433 ip6_header_t *ip6_0;
434 u8 *l3_0;
435 if (is_ip4)
436 {
437 ip4_vxlan_gbp_header_t *hdr = underlay0;
438
439 /* Fix the IP4 checksum and length */
440 ip4_0 = &hdr->ip4;
441 ip4_0->length = clib_host_to_net_u16 (len0);
442
Zhiyong Yang257573d2019-05-16 05:24:17 -0400443 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200444 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400445 ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200446 ip4_0->tos = ip4_0_tos;
447 }
448
449 l3_0 = (u8 *) ip4_0;
450 udp0 = &hdr->udp;
451 vxlan_gbp0 = &hdr->vxlan_gbp;
452 }
453 else /* ip6 path */
454 {
455 ip6_vxlan_gbp_header_t *hdr = underlay0;
456
457 /* Fix IP6 payload length */
458 ip6_0 = &hdr->ip6;
459 ip6_0->payload_length = payload_l0;
460
461 l3_0 = (u8 *) ip6_0;
462 udp0 = &hdr->udp;
463 vxlan_gbp0 = &hdr->vxlan_gbp;
464 }
465
466 /* Fix UDP length and set source port */
467 udp0->length = payload_l0;
468 udp0->src_port = flow_hash0;
469
470 /* set source class and gpflags */
Zhiyong Yang257573d2019-05-16 05:24:17 -0400471 vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200472 vxlan_gbp0->sclass =
Zhiyong Yang257573d2019-05-16 05:24:17 -0400473 clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200474
475 if (csum_offload)
476 {
Zhiyong Yang257573d2019-05-16 05:24:17 -0400477 b[0]->flags |= csum_flags;
478 vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
479 vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200480 }
481 /* IPv4 UDP checksum only if checksum offload is used */
482 else if (is_ip4)
483 {
484 ip_csum_t sum0 = ip4_0->checksum;
485 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
486 length /* changed member */ );
487 if (PREDICT_FALSE (ip4_0_tos))
488 {
489 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
490 tos /* changed member */ );
491 }
492 ip4_0->checksum = ip_csum_fold (sum0);
493 }
494 /* IPv6 UDP checksum is mandatory */
495 else
496 {
497 int bogus = 0;
498
499 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
Zhiyong Yang257573d2019-05-16 05:24:17 -0400500 (vm, b[0], ip6_0, &bogus);
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200501 ASSERT (bogus == 0);
502 if (udp0->checksum == 0)
503 udp0->checksum = 0xffff;
504 }
505
Shawn Ji623b4f82019-12-18 10:10:54 +0800506 /* save inner packet flow_hash for load-balance node */
507 vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
508
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200509 vlib_increment_combined_counter (tx_counter, thread_index,
510 sw_if_index0, 1, len0);
511 pkts_encapsulated++;
512
Zhiyong Yang257573d2019-05-16 05:24:17 -0400513 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200514 {
515 vxlan_gbp_encap_trace_t *tr =
Zhiyong Yang257573d2019-05-16 05:24:17 -0400516 vlib_add_trace (vm, node, b[0], sizeof (*tr));
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200517 tr->tunnel_index = t0 - vxm->tunnels;
518 tr->vni = t0->vni;
Zhiyong Yang257573d2019-05-16 05:24:17 -0400519 tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
520 tr->flags = vnet_buffer2 (b[0])->gbp.flags;
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200521 }
Zhiyong Yang257573d2019-05-16 05:24:17 -0400522 b += 1;
523
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200524 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
525 to_next, n_left_to_next,
526 bi0, next0);
527 }
528
529 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
530 }
531
532 /* Do we still need this now that tunnel tx stats is kept? */
533 vlib_node_increment_counter (vm, node->node_index,
534 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
535 pkts_encapsulated);
536
537 return from_frame->n_vectors;
538}
539
Filip Tehlare1714d32019-03-05 03:01:43 -0800540VLIB_NODE_FN (vxlan4_gbp_encap_node) (vlib_main_t * vm,
541 vlib_node_runtime_t * node,
542 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200543{
544 /* Disable chksum offload as setup overhead in tx node is not worthwhile
545 for ip4 header checksum only, unless udp checksum is also required */
546 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
547 /* csum_offload */ 0);
548}
549
Filip Tehlare1714d32019-03-05 03:01:43 -0800550VLIB_NODE_FN (vxlan6_gbp_encap_node) (vlib_main_t * vm,
551 vlib_node_runtime_t * node,
552 vlib_frame_t * from_frame)
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200553{
554 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
555 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
556 /* csum_offload */ 1);
557}
558
559/* *INDENT-OFF* */
560VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
561{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200562 .name = "vxlan4-gbp-encap",
563 .vector_size = sizeof (u32),
564 .format_trace = format_vxlan_gbp_encap_trace,
565 .type = VLIB_NODE_TYPE_INTERNAL,
566 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
567 .error_strings = vxlan_gbp_encap_error_strings,
568 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
569 .next_nodes = {
570 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
571 },
572};
573
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200574VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
575{
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200576 .name = "vxlan6-gbp-encap",
577 .vector_size = sizeof (u32),
578 .format_trace = format_vxlan_gbp_encap_trace,
579 .type = VLIB_NODE_TYPE_INTERNAL,
580 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
581 .error_strings = vxlan_gbp_encap_error_strings,
582 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
583 .next_nodes = {
584 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
585 },
586};
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200587/* *INDENT-ON* */
588
589/*
590 * fd.io coding-style-patch-verification: ON
591 *
592 * Local Variables:
593 * eval: (c-set-style "gnu")
594 * End:
595 */