blob: 0961a27942d39a54c99bdef688650b6f8c544b14 [file] [log] [blame]
Eyal Bari3ce0b082018-01-17 12:06:32 +02001
Ed Warnickecb9cada2015-12-08 15:45:58 -07002/*
3 * Copyright (c) 2015 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include <vppinfra/error.h>
17#include <vppinfra/hash.h>
18#include <vnet/vnet.h>
19#include <vnet/ip/ip.h>
20#include <vnet/ethernet/ethernet.h>
Mohsin Kazmidbd5fb32020-06-02 15:21:03 +020021#include <vnet/interface_output.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070022#include <vnet/vxlan/vxlan.h>
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -070023#include <vnet/qos/qos_types.h>
eyal bari82e21d72018-04-26 13:14:55 +030024#include <vnet/adj/rewrite.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070025
26/* Statistics (not all errors) */
27#define foreach_vxlan_encap_error \
John Lo3ef822e2016-06-07 09:14:07 -040028_(ENCAPSULATED, "good packets encapsulated")
Ed Warnickecb9cada2015-12-08 15:45:58 -070029
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020030static char *vxlan_encap_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070031#define _(sym,string) string,
32 foreach_vxlan_encap_error
33#undef _
34};
35
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020036typedef enum
37{
Ed Warnickecb9cada2015-12-08 15:45:58 -070038#define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020039 foreach_vxlan_encap_error
Ed Warnickecb9cada2015-12-08 15:45:58 -070040#undef _
41 VXLAN_ENCAP_N_ERROR,
42} vxlan_encap_error_t;
43
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020044typedef enum
45{
46 VXLAN_ENCAP_NEXT_DROP,
47 VXLAN_ENCAP_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -070048} vxlan_encap_next_t;
49
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020050typedef struct
51{
Ed Warnickecb9cada2015-12-08 15:45:58 -070052 u32 tunnel_index;
53 u32 vni;
54} vxlan_encap_trace_t;
55
Filip Tehlare1714d32019-03-05 03:01:43 -080056#ifndef CLIB_MARCH_VARIANT
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020057u8 *
58format_vxlan_encap_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070059{
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020062 vxlan_encap_trace_t *t = va_arg (*args, vxlan_encap_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070063
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020064 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
John Loc42912d2016-11-07 18:30:47 -050065 t->tunnel_index, t->vni);
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 return s;
67}
Filip Tehlare1714d32019-03-05 03:01:43 -080068#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070069
John Loc42912d2016-11-07 18:30:47 -050070always_inline uword
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +000071vxlan_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
72 vlib_frame_t *from_frame, u8 is_ip4)
Ed Warnickecb9cada2015-12-08 15:45:58 -070073{
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020074 u32 n_left_from, next_index, *from, *to_next;
75 vxlan_main_t *vxm = &vxlan_main;
76 vnet_main_t *vnm = vxm->vnet_main;
77 vnet_interface_main_t *im = &vnm->interface_main;
78 vlib_combined_counter_main_t *tx_counter =
79 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
Ed Warnickecb9cada2015-12-08 15:45:58 -070080 u32 pkts_encapsulated = 0;
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020081 u32 thread_index = vlib_get_thread_index ();
John Loc42912d2016-11-07 18:30:47 -050082 u32 sw_if_index0 = 0, sw_if_index1 = 0;
83 u32 next0 = 0, next1 = 0;
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020084 vxlan_tunnel_t *t0 = NULL, *t1 = NULL;
John Lo25d417f2018-02-15 15:47:53 -050085 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
Zhiyong Yang102dd1c2020-03-27 13:04:38 +000086 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
87 vlib_buffer_t **b = bufs;
Ed Warnickecb9cada2015-12-08 15:45:58 -070088
89 from = vlib_frame_vector_args (from_frame);
90 n_left_from = from_frame->n_vectors;
91
92 next_index = node->cached_next_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020094 STATIC_ASSERT_SIZEOF (ip6_vxlan_header_t, 56);
95 STATIC_ASSERT_SIZEOF (ip4_vxlan_header_t, 36);
Eyal Bari3ce0b082018-01-17 12:06:32 +020096
eyal bari82e21d72018-04-26 13:14:55 +030097 u8 const underlay_hdr_len = is_ip4 ?
Mohsin Kazmi21a1de42020-06-02 15:12:30 +020098 sizeof (ip4_vxlan_header_t) : sizeof (ip6_vxlan_header_t);
99 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100100 u32 const outer_packet_csum_offload_flags =
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000101 is_ip4 ? (VNET_BUFFER_OFFLOAD_F_OUTER_IP_CKSUM |
102 VNET_BUFFER_OFFLOAD_F_TNL_VXLAN) :
103 (VNET_BUFFER_OFFLOAD_F_OUTER_UDP_CKSUM |
104 VNET_BUFFER_OFFLOAD_F_TNL_VXLAN);
Mohsin Kazmidbd5fb32020-06-02 15:21:03 +0200105
Zhiyong Yang102dd1c2020-03-27 13:04:38 +0000106 vlib_get_buffers (vm, from, bufs, n_left_from);
Eyal Bari3ce0b082018-01-17 12:06:32 +0200107
Ed Warnickecb9cada2015-12-08 15:45:58 -0700108 while (n_left_from > 0)
109 {
110 u32 n_left_to_next;
111
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200112 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113
114 while (n_left_from >= 4 && n_left_to_next >= 2)
115 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116 /* Prefetch next iteration. */
117 {
Zhiyong Yang102dd1c2020-03-27 13:04:38 +0000118 vlib_prefetch_buffer_header (b[2], LOAD);
119 vlib_prefetch_buffer_header (b[3], LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200121 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
122 2 * CLIB_CACHE_LINE_BYTES, LOAD);
123 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
124 2 * CLIB_CACHE_LINE_BYTES, LOAD);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700125 }
126
eyal bari82e21d72018-04-26 13:14:55 +0300127 u32 bi0 = to_next[0] = from[0];
128 u32 bi1 = to_next[1] = from[1];
129 from += 2;
130 to_next += 2;
131 n_left_to_next -= 2;
132 n_left_from -= 2;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200133
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200134 vlib_buffer_t *b0 = b[0];
135 vlib_buffer_t *b1 = b[1];
Zhiyong Yang102dd1c2020-03-27 13:04:38 +0000136 b += 2;
137
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200138 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
139 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
Eyal Bari3ce0b082018-01-17 12:06:32 +0200140
John Loc42912d2016-11-07 18:30:47 -0500141 /* Get next node index and adj index from tunnel next_dpo */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200142 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
John Loc42912d2016-11-07 18:30:47 -0500143 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200144 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
145 vnet_hw_interface_t *hi0 =
146 vnet_get_sup_hw_interface (vnm, sw_if_index0);
John Loc42912d2016-11-07 18:30:47 -0500147 t0 = &vxm->tunnels[hi0->dev_instance];
John Lo25d417f2018-02-15 15:47:53 -0500148 /* Note: change to always set next0 if it may set to drop */
John Loc42912d2016-11-07 18:30:47 -0500149 next0 = t0->next_dpo.dpoi_next_node;
John Lo25d417f2018-02-15 15:47:53 -0500150 dpoi_idx0 = t0->next_dpo.dpoi_index;
John Loc42912d2016-11-07 18:30:47 -0500151 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152
John Loc42912d2016-11-07 18:30:47 -0500153 /* Get next node index and adj index from tunnel next_dpo */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200154 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
John Loc42912d2016-11-07 18:30:47 -0500155 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200156 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
157 {
John Lo25d417f2018-02-15 15:47:53 -0500158 sw_if_index1 = sw_if_index0;
159 t1 = t0;
160 next1 = next0;
161 dpoi_idx1 = dpoi_idx0;
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200162 }
John Lo25d417f2018-02-15 15:47:53 -0500163 else
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200164 {
165 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
166 vnet_hw_interface_t *hi1 =
167 vnet_get_sup_hw_interface (vnm, sw_if_index1);
John Lo25d417f2018-02-15 15:47:53 -0500168 t1 = &vxm->tunnels[hi1->dev_instance];
169 /* Note: change to always set next1 if it may set to drop */
170 next1 = t1->next_dpo.dpoi_next_node;
171 dpoi_idx1 = t1->next_dpo.dpoi_index;
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200172 }
John Loc42912d2016-11-07 18:30:47 -0500173 }
John Lo25d417f2018-02-15 15:47:53 -0500174
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200175 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
176 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200178 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
179 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
180 vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0),
181 vlib_buffer_get_current (b1),
182 underlay_hdr_len);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200184 vlib_buffer_advance (b0, -underlay_hdr_len);
185 vlib_buffer_advance (b1, -underlay_hdr_len);
Eyal Bari3ce0b082018-01-17 12:06:32 +0200186
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200187 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
188 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
189 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
190 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
Eyal Bari3ce0b082018-01-17 12:06:32 +0200191
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200192 void *underlay0 = vlib_buffer_get_current (b0);
193 void *underlay1 = vlib_buffer_get_current (b1);
eyal bari82e21d72018-04-26 13:14:55 +0300194
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200195 ip4_header_t *ip4_0, *ip4_1;
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700196 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200197 ip6_header_t *ip6_0, *ip6_1;
198 udp_header_t *udp0, *udp1;
199 u8 *l3_0, *l3_1;
John Loc42912d2016-11-07 18:30:47 -0500200 if (is_ip4)
201 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200202 ip4_vxlan_header_t *hdr0 = underlay0;
203 ip4_vxlan_header_t *hdr1 = underlay1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204
John Loc42912d2016-11-07 18:30:47 -0500205 /* Fix the IP4 checksum and length */
Eyal Bari3ce0b082018-01-17 12:06:32 +0200206 ip4_0 = &hdr0->ip4;
207 ip4_1 = &hdr1->ip4;
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200208 ip4_0->length = clib_host_to_net_u16 (len0);
209 ip4_1->length = clib_host_to_net_u16 (len1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700211 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200212 {
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700213 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
214 ip4_0->tos = ip4_0_tos;
215 }
216 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200217 {
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700218 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
219 ip4_1->tos = ip4_1_tos;
220 }
221
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200222 l3_0 = (u8 *) ip4_0;
223 l3_1 = (u8 *) ip4_1;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200224 udp0 = &hdr0->udp;
225 udp1 = &hdr1->udp;
John Loc42912d2016-11-07 18:30:47 -0500226 }
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200227 else /* ipv6 */
John Loc42912d2016-11-07 18:30:47 -0500228 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200229 ip6_vxlan_header_t *hdr0 = underlay0;
230 ip6_vxlan_header_t *hdr1 = underlay1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231
John Loc42912d2016-11-07 18:30:47 -0500232 /* Fix IP6 payload length */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200233 ip6_0 = &hdr0->ip6;
234 ip6_1 = &hdr1->ip6;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200235 ip6_0->payload_length = payload_l0;
236 ip6_1->payload_length = payload_l1;
John Loc42912d2016-11-07 18:30:47 -0500237
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200238 l3_0 = (u8 *) ip6_0;
239 l3_1 = (u8 *) ip6_1;
240 udp0 = &hdr0->udp;
241 udp1 = &hdr1->udp;
John Loc42912d2016-11-07 18:30:47 -0500242 }
Chris Luke99cb3352016-04-26 10:49:53 -0400243
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200244 /* Fix UDP length and set source port */
245 udp0->length = payload_l0;
246 udp0->src_port = flow_hash0;
247 udp1->length = payload_l1;
248 udp1->src_port = flow_hash1;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200249
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000250 if (b0->flags & VNET_BUFFER_F_OFFLOAD)
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200251 {
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000252 vnet_buffer2 (b0)->outer_l3_hdr_offset = l3_0 - b0->data;
253 vnet_buffer2 (b0)->outer_l4_hdr_offset = (u8 *) udp0 - b0->data;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100254 vnet_buffer_offload_flags_set (b0,
255 outer_packet_csum_offload_flags);
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200256 }
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000257 /* IPv4 checksum only */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200258 else if (is_ip4)
259 {
260 ip_csum_t sum0 = ip4_0->checksum;
261 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000262 length /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700263 if (PREDICT_FALSE (ip4_0_tos))
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200264 {
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700265 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000266 tos /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700267 }
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200268 ip4_0->checksum = ip_csum_fold (sum0);
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000269 }
270 /* IPv6 UDP checksum is mandatory */
271 else
272 {
273 int bogus = 0;
274
275 udp0->checksum =
276 ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus);
277 ASSERT (bogus == 0);
278 if (udp0->checksum == 0)
279 udp0->checksum = 0xffff;
280 }
281
282 if (b1->flags & VNET_BUFFER_F_OFFLOAD)
283 {
284 vnet_buffer2 (b1)->outer_l3_hdr_offset = l3_1 - b1->data;
285 vnet_buffer2 (b1)->outer_l4_hdr_offset = (u8 *) udp1 - b1->data;
286 vnet_buffer_offload_flags_set (b1,
287 outer_packet_csum_offload_flags);
288 }
289 /* IPv4 checksum only */
290 else if (is_ip4)
291 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200292 ip_csum_t sum1 = ip4_1->checksum;
293 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000294 length /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700295 if (PREDICT_FALSE (ip4_1_tos))
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200296 {
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700297 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000298 tos /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700299 }
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200300 ip4_1->checksum = ip_csum_fold (sum1);
301 }
302 /* IPv6 UDP checksum is mandatory */
303 else
304 {
305 int bogus = 0;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200306
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200307 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
308 (vm, b1, ip6_1, &bogus);
309 ASSERT (bogus == 0);
310 if (udp1->checksum == 0)
311 udp1->checksum = 0xffff;
312 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200314 /* save inner packet flow_hash for load-balance node */
315 vnet_buffer (b0)->ip.flow_hash = flow_hash0;
316 vnet_buffer (b1)->ip.flow_hash = flow_hash1;
Shawn Ji623b4f82019-12-18 10:10:54 +0800317
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200318 if (sw_if_index0 == sw_if_index1)
319 {
320 vlib_increment_combined_counter (tx_counter, thread_index,
321 sw_if_index0, 2, len0 + len1);
322 }
323 else
324 {
325 vlib_increment_combined_counter (tx_counter, thread_index,
326 sw_if_index0, 1, len0);
327 vlib_increment_combined_counter (tx_counter, thread_index,
328 sw_if_index1, 1, len1);
329 }
330 pkts_encapsulated += 2;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700331
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200332 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
333 {
334 vxlan_encap_trace_t *tr =
335 vlib_add_trace (vm, node, b0, sizeof (*tr));
336 tr->tunnel_index = t0 - vxm->tunnels;
337 tr->vni = t0->vni;
338 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200340 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
341 {
342 vxlan_encap_trace_t *tr =
343 vlib_add_trace (vm, node, b1, sizeof (*tr));
344 tr->tunnel_index = t1 - vxm->tunnels;
345 tr->vni = t1->vni;
346 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347
348 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
349 to_next, n_left_to_next,
350 bi0, bi1, next0, next1);
351 }
352
353 while (n_left_from > 0 && n_left_to_next > 0)
354 {
eyal bari82e21d72018-04-26 13:14:55 +0300355 u32 bi0 = to_next[0] = from[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700356 from += 1;
357 to_next += 1;
358 n_left_from -= 1;
359 n_left_to_next -= 1;
360
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200361 vlib_buffer_t *b0 = b[0];
Zhiyong Yang102dd1c2020-03-27 13:04:38 +0000362 b += 1;
363
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200364 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
eyal bari82e21d72018-04-26 13:14:55 +0300365
John Loc42912d2016-11-07 18:30:47 -0500366 /* Get next node index and adj index from tunnel next_dpo */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200367 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
John Loc42912d2016-11-07 18:30:47 -0500368 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200369 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
370 vnet_hw_interface_t *hi0 =
371 vnet_get_sup_hw_interface (vnm, sw_if_index0);
John Loc42912d2016-11-07 18:30:47 -0500372 t0 = &vxm->tunnels[hi0->dev_instance];
373 /* Note: change to always set next0 if it may be set to drop */
374 next0 = t0->next_dpo.dpoi_next_node;
John Lo25d417f2018-02-15 15:47:53 -0500375 dpoi_idx0 = t0->next_dpo.dpoi_index;
John Loc42912d2016-11-07 18:30:47 -0500376 }
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200377 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200379 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
380 vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0),
381 underlay_hdr_len);
eyal bari82e21d72018-04-26 13:14:55 +0300382
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200383 vlib_buffer_advance (b0, -underlay_hdr_len);
384 void *underlay0 = vlib_buffer_get_current (b0);
eyal bari82e21d72018-04-26 13:14:55 +0300385
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200386 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
387 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
Eyal Bari3ce0b082018-01-17 12:06:32 +0200388
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200389 udp_header_t *udp0;
390 ip4_header_t *ip4_0;
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700391 qos_bits_t ip4_0_tos = 0;
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200392 ip6_header_t *ip6_0;
393 u8 *l3_0;
John Loc42912d2016-11-07 18:30:47 -0500394 if (is_ip4)
395 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200396 ip4_vxlan_header_t *hdr = underlay0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397
John Loc42912d2016-11-07 18:30:47 -0500398 /* Fix the IP4 checksum and length */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200399 ip4_0 = &hdr->ip4;
400 ip4_0->length = clib_host_to_net_u16 (len0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700401
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700402 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200403 {
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700404 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
405 ip4_0->tos = ip4_0_tos;
406 }
407
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200408 l3_0 = (u8 *) ip4_0;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200409 udp0 = &hdr->udp;
John Loc42912d2016-11-07 18:30:47 -0500410 }
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200411 else /* ip6 path */
John Loc42912d2016-11-07 18:30:47 -0500412 {
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200413 ip6_vxlan_header_t *hdr = underlay0;
Chris Luke99cb3352016-04-26 10:49:53 -0400414
John Loc42912d2016-11-07 18:30:47 -0500415 /* Fix IP6 payload length */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200416 ip6_0 = &hdr->ip6;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200417 ip6_0->payload_length = payload_l0;
Chris Luke99cb3352016-04-26 10:49:53 -0400418
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200419 l3_0 = (u8 *) ip6_0;
420 udp0 = &hdr->udp;
John Loc42912d2016-11-07 18:30:47 -0500421 }
422
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200423 /* Fix UDP length and set source port */
424 udp0->length = payload_l0;
425 udp0->src_port = flow_hash0;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200426
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000427 if (b0->flags & VNET_BUFFER_F_OFFLOAD)
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200428 {
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000429 vnet_buffer2 (b0)->outer_l3_hdr_offset = l3_0 - b0->data;
430 vnet_buffer2 (b0)->outer_l4_hdr_offset = (u8 *) udp0 - b0->data;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100431 vnet_buffer_offload_flags_set (b0,
432 outer_packet_csum_offload_flags);
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200433 }
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000434 /* IPv4 checksum only */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200435 else if (is_ip4)
436 {
437 ip_csum_t sum0 = ip4_0->checksum;
438 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000439 length /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700440 if (PREDICT_FALSE (ip4_0_tos))
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200441 {
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700442 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000443 tos /* changed member */);
Igor Mikhailov (imichail)66364402018-04-24 21:53:00 -0700444 }
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200445 ip4_0->checksum = ip_csum_fold (sum0);
446 }
447 /* IPv6 UDP checksum is mandatory */
448 else
449 {
450 int bogus = 0;
Eyal Bari3ce0b082018-01-17 12:06:32 +0200451
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200452 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
453 (vm, b0, ip6_0, &bogus);
454 ASSERT (bogus == 0);
455 if (udp0->checksum == 0)
456 udp0->checksum = 0xffff;
457 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200459 /* reuse inner packet flow_hash for load-balance node */
460 vnet_buffer (b0)->ip.flow_hash = flow_hash0;
Shawn Ji623b4f82019-12-18 10:10:54 +0800461
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200462 vlib_increment_combined_counter (tx_counter, thread_index,
463 sw_if_index0, 1, len0);
464 pkts_encapsulated++;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700465
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200466 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
467 {
468 vxlan_encap_trace_t *tr =
469 vlib_add_trace (vm, node, b0, sizeof (*tr));
470 tr->tunnel_index = t0 - vxm->tunnels;
471 tr->vni = t0->vni;
472 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700473 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
474 to_next, n_left_to_next,
475 bi0, next0);
476 }
477
478 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
479 }
480
481 /* Do we still need this now that tunnel tx stats is kept? */
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200482 vlib_node_increment_counter (vm, node->node_index,
483 VXLAN_ENCAP_ERROR_ENCAPSULATED,
484 pkts_encapsulated);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486 return from_frame->n_vectors;
487}
488
Filip Tehlare1714d32019-03-05 03:01:43 -0800489VLIB_NODE_FN (vxlan4_encap_node) (vlib_main_t * vm,
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200490 vlib_node_runtime_t * node,
491 vlib_frame_t * from_frame)
John Loc42912d2016-11-07 18:30:47 -0500492{
John Lof4215a62017-09-18 00:20:05 -0400493 /* Disable chksum offload as setup overhead in tx node is not worthwhile
494 for ip4 header checksum only, unless udp checksum is also required */
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000495 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
John Loc42912d2016-11-07 18:30:47 -0500496}
497
Filip Tehlare1714d32019-03-05 03:01:43 -0800498VLIB_NODE_FN (vxlan6_encap_node) (vlib_main_t * vm,
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200499 vlib_node_runtime_t * node,
500 vlib_frame_t * from_frame)
John Loc42912d2016-11-07 18:30:47 -0500501{
John Lof4215a62017-09-18 00:20:05 -0400502 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
Mohsin Kazmi4fd9f102021-06-17 17:29:27 +0000503 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
John Loc42912d2016-11-07 18:30:47 -0500504}
505
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200506/* *INDENT-OFF* */
John Loc42912d2016-11-07 18:30:47 -0500507VLIB_REGISTER_NODE (vxlan4_encap_node) = {
John Loc42912d2016-11-07 18:30:47 -0500508 .name = "vxlan4-encap",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700509 .vector_size = sizeof (u32),
510 .format_trace = format_vxlan_encap_trace,
511 .type = VLIB_NODE_TYPE_INTERNAL,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700512 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
513 .error_strings = vxlan_encap_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700514 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700515 .next_nodes = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
517 },
518};
Damjan Marion1c80e832016-05-11 23:07:18 +0200519
John Loc42912d2016-11-07 18:30:47 -0500520VLIB_REGISTER_NODE (vxlan6_encap_node) = {
John Loc42912d2016-11-07 18:30:47 -0500521 .name = "vxlan6-encap",
522 .vector_size = sizeof (u32),
523 .format_trace = format_vxlan_encap_trace,
524 .type = VLIB_NODE_TYPE_INTERNAL,
525 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
526 .error_strings = vxlan_encap_error_strings,
527 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
528 .next_nodes = {
529 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
530 },
531};
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200532/* *INDENT-ON* */
John Loc42912d2016-11-07 18:30:47 -0500533
Mohsin Kazmi21a1de42020-06-02 15:12:30 +0200534/*
535 * fd.io coding-style-patch-verification: ON
536 *
537 * Local Variables:
538 * eval: (c-set-style "gnu")
539 * End:
540 */