blob: 2859a9ae65236490846aa61941a060f08f1fdcd6 [file] [log] [blame]
Marco Varleseb598f1d2017-09-19 14:25:28 +02001/*
2 * Copyright (c) 2017 SUSE LLC.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/geneve/geneve.h>
21
22/* Statistics (not all errors) */
23#define foreach_geneve_encap_error \
24_(ENCAPSULATED, "good packets encapsulated")
25
26static char *geneve_encap_error_strings[] = {
27#define _(sym,string) string,
28 foreach_geneve_encap_error
29#undef _
30};
31
32typedef enum
33{
34#define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
35 foreach_geneve_encap_error
36#undef _
37 GENEVE_ENCAP_N_ERROR,
38} geneve_encap_error_t;
39
40typedef enum
41{
42 GENEVE_ENCAP_NEXT_DROP,
43 GENEVE_ENCAP_N_NEXT,
44} geneve_encap_next_t;
45
Marco Varleseb598f1d2017-09-19 14:25:28 +020046#define foreach_fixed_header4_offset \
47 _(0) _(1) _(2) _(3)
48
49#define foreach_fixed_header6_offset \
50 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
51
52always_inline uword
53geneve_encap_inline (vlib_main_t * vm,
54 vlib_node_runtime_t * node,
55 vlib_frame_t * from_frame, u32 is_ip4)
56{
57 u32 n_left_from, next_index, *from, *to_next;
58 geneve_main_t *vxm = &geneve_main;
59 vnet_main_t *vnm = vxm->vnet_main;
60 vnet_interface_main_t *im = &vnm->interface_main;
61 u32 pkts_encapsulated = 0;
62 u16 old_l0 = 0, old_l1 = 0;
Damjan Marion067cd622018-07-11 12:47:43 +020063 u32 thread_index = vm->thread_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +020064 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Zhiyong Yange4b202a2018-09-04 21:43:57 -040065 u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
Marco Varleseb598f1d2017-09-19 14:25:28 +020066 u32 next0 = 0, next1 = 0;
67 vnet_hw_interface_t *hi0, *hi1;
68 geneve_tunnel_t *t0 = NULL, *t1 = NULL;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -040069 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
Marco Varleseb598f1d2017-09-19 14:25:28 +020070
71 from = vlib_frame_vector_args (from_frame);
72 n_left_from = from_frame->n_vectors;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -040073 vlib_get_buffers (vm, from, bufs, n_left_from);
Marco Varleseb598f1d2017-09-19 14:25:28 +020074
75 next_index = node->cached_next_index;
76 stats_sw_if_index = node->runtime_data[0];
77 stats_n_packets = stats_n_bytes = 0;
78
79 while (n_left_from > 0)
80 {
81 u32 n_left_to_next;
82
83 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
84
85 while (n_left_from >= 4 && n_left_to_next >= 2)
86 {
87 u32 bi0, bi1;
Marco Varleseb598f1d2017-09-19 14:25:28 +020088 u32 flow_hash0, flow_hash1;
89 u32 len0, len1;
90 ip4_header_t *ip4_0, *ip4_1;
91 ip6_header_t *ip6_0, *ip6_1;
92 udp_header_t *udp0, *udp1;
93 u64 *copy_src0, *copy_dst0;
94 u64 *copy_src1, *copy_dst1;
95 u32 *copy_src_last0, *copy_dst_last0;
96 u32 *copy_src_last1, *copy_dst_last1;
97 u16 new_l0, new_l1;
98 ip_csum_t sum0, sum1;
99
100 /* Prefetch next iteration. */
101 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400102 vlib_prefetch_buffer_header (b[2], LOAD);
103 vlib_prefetch_buffer_header (b[3], LOAD);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200104
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400105 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
106 2 * CLIB_CACHE_LINE_BYTES, LOAD);
107 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
108 2 * CLIB_CACHE_LINE_BYTES, LOAD);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200109 }
110
111 bi0 = from[0];
112 bi1 = from[1];
113 to_next[0] = bi0;
114 to_next[1] = bi1;
115 from += 2;
116 to_next += 2;
117 n_left_to_next -= 2;
118 n_left_from -= 2;
119
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400120 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
121 flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200122
Shawn Ji623b4f82019-12-18 10:10:54 +0800123
Marco Varleseb598f1d2017-09-19 14:25:28 +0200124 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400125 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200126 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400127 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
Marco Varleseb598f1d2017-09-19 14:25:28 +0200128 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
129 t0 = &vxm->tunnels[hi0->dev_instance];
130 /* Note: change to always set next0 if it may be set to drop */
131 next0 = t0->next_dpo.dpoi_next_node;
132 }
Marco Varlese7af4a572017-11-27 16:46:04 +0100133
Dave Barach47d41ad2020-02-17 09:13:26 -0500134 ALWAYS_ASSERT (t0 != NULL);
Marco Varlese7af4a572017-11-27 16:46:04 +0100135
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400136 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200137
138 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400139 if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200140 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400141 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
Marco Varleseb598f1d2017-09-19 14:25:28 +0200142 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
143 t1 = &vxm->tunnels[hi1->dev_instance];
144 /* Note: change to always set next1 if it may be set to drop */
145 next1 = t1->next_dpo.dpoi_next_node;
146 }
Marco Varlese7af4a572017-11-27 16:46:04 +0100147
Dave Barach47d41ad2020-02-17 09:13:26 -0500148 ALWAYS_ASSERT (t1 != NULL);
Marco Varlese7af4a572017-11-27 16:46:04 +0100149
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400150 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200151
152 /* Apply the rewrite string. $$$$ vnet_rewrite? */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400153 vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
154 vlib_buffer_advance (b[1], -(word) _vec_len (t1->rewrite));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200155
156 if (is_ip4)
157 {
158 u8 ip4_geneve_base_header_len =
159 sizeof (ip4_header_t) + sizeof (udp_header_t) +
160 GENEVE_BASE_HEADER_LENGTH;
161 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
162 u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
163#if SUPPORT_OPTIONS_HEADER==1
164 ip4_geneve_header_total_len0 += t0->options_len;
165 ip4_geneve_header_total_len1 += t1->options_len;
166#endif
167 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
168 ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
169
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400170 ip4_0 = vlib_buffer_get_current (b[0]);
171 ip4_1 = vlib_buffer_get_current (b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200172
173 /* Copy the fixed header */
174 copy_dst0 = (u64 *) ip4_0;
175 copy_src0 = (u64 *) t0->rewrite;
176 copy_dst1 = (u64 *) ip4_1;
177 copy_src1 = (u64 *) t1->rewrite;
178 /* Copy first 32 octets 8-bytes at a time */
179#define _(offs) copy_dst0[offs] = copy_src0[offs];
180 foreach_fixed_header4_offset;
181#undef _
182#define _(offs) copy_dst1[offs] = copy_src1[offs];
183 foreach_fixed_header4_offset;
184#undef _
185 /* Last 4 octets. Hopefully gcc will be our friend */
186 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
187 copy_src_last0 = (u32 *) (&copy_src0[4]);
188 copy_dst_last0[0] = copy_src_last0[0];
189 copy_dst_last1 = (u32 *) (&copy_dst1[4]);
190 copy_src_last1 = (u32 *) (&copy_src1[4]);
191 copy_dst_last1[0] = copy_src_last1[0];
192
193 /* Fix the IP4 checksum and length */
194 sum0 = ip4_0->checksum;
195 new_l0 = /* old_l0 always 0, see the rewrite setup */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400196 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200197 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
198 length /* changed member */ );
199 ip4_0->checksum = ip_csum_fold (sum0);
200 ip4_0->length = new_l0;
201 sum1 = ip4_1->checksum;
202 new_l1 = /* old_l1 always 0, see the rewrite setup */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400203 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200204 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
205 length /* changed member */ );
206 ip4_1->checksum = ip_csum_fold (sum1);
207 ip4_1->length = new_l1;
208
209 /* Fix UDP length and set source port */
210 udp0 = (udp_header_t *) (ip4_0 + 1);
211 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400212 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
Marco Varleseb598f1d2017-09-19 14:25:28 +0200213 sizeof (*ip4_0));
214 udp0->length = new_l0;
215 udp0->src_port = flow_hash0;
216 udp1 = (udp_header_t *) (ip4_1 + 1);
217 new_l1 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400218 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]) -
Marco Varleseb598f1d2017-09-19 14:25:28 +0200219 sizeof (*ip4_1));
220 udp1->length = new_l1;
221 udp1->src_port = flow_hash1;
222 }
223 else /* ipv6 */
224 {
225 int bogus = 0;
226
227 u8 ip6_geneve_base_header_len =
228 sizeof (ip6_header_t) + sizeof (udp_header_t) +
229 GENEVE_BASE_HEADER_LENGTH;
230 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
231 u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
232#if SUPPORT_OPTIONS_HEADER==1
233 ip6_geneve_header_total_len0 += t0->options_len;
234 ip6_geneve_header_total_len1 += t1->options_len;
235#endif
236 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
237 ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
238
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400239 ip6_0 = vlib_buffer_get_current (b[0]);
240 ip6_1 = vlib_buffer_get_current (b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200241
242 /* Copy the fixed header */
243 copy_dst0 = (u64 *) ip6_0;
244 copy_src0 = (u64 *) t0->rewrite;
245 copy_dst1 = (u64 *) ip6_1;
246 copy_src1 = (u64 *) t1->rewrite;
247 /* Copy first 56 (ip6) octets 8-bytes at a time */
248#define _(offs) copy_dst0[offs] = copy_src0[offs];
249 foreach_fixed_header6_offset;
250#undef _
251#define _(offs) copy_dst1[offs] = copy_src1[offs];
252 foreach_fixed_header6_offset;
253#undef _
254 /* Fix IP6 payload length */
255 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400256 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200257 - sizeof (*ip6_0));
258 ip6_0->payload_length = new_l0;
259 new_l1 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400260 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200261 - sizeof (*ip6_1));
262 ip6_1->payload_length = new_l1;
263
264 /* Fix UDP length and set source port */
265 udp0 = (udp_header_t *) (ip6_0 + 1);
266 udp0->length = new_l0;
267 udp0->src_port = flow_hash0;
268 udp1 = (udp_header_t *) (ip6_1 + 1);
269 udp1->length = new_l1;
270 udp1->src_port = flow_hash1;
271
272 /* IPv6 UDP checksum is mandatory */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400273 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
Marco Varleseb598f1d2017-09-19 14:25:28 +0200274 ip6_0,
275 &bogus);
276 ASSERT (bogus == 0);
277 if (udp0->checksum == 0)
278 udp0->checksum = 0xffff;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400279 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[1],
Marco Varleseb598f1d2017-09-19 14:25:28 +0200280 ip6_1,
281 &bogus);
282 ASSERT (bogus == 0);
283 if (udp1->checksum == 0)
284 udp1->checksum = 0xffff;
285 }
286
287 pkts_encapsulated += 2;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400288 len0 = vlib_buffer_length_in_chain (vm, b[0]);
289 len1 = vlib_buffer_length_in_chain (vm, b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200290 stats_n_packets += 2;
291 stats_n_bytes += len0 + len1;
292
Shawn Ji623b4f82019-12-18 10:10:54 +0800293 /* save inner packet flow_hash for load-balance node */
294 vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
295 vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
296
Marco Varleseb598f1d2017-09-19 14:25:28 +0200297 /* Batch stats increment on the same geneve tunnel so counter is not
298 incremented per packet. Note stats are still incremented for deleted
299 and admin-down tunnel where packets are dropped. It is not worthwhile
300 to check for this rare case and affect normal path performance. */
301 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
302 (sw_if_index1 != stats_sw_if_index)))
303 {
304 stats_n_packets -= 2;
305 stats_n_bytes -= len0 + len1;
306 if (sw_if_index0 == sw_if_index1)
307 {
308 if (stats_n_packets)
309 vlib_increment_combined_counter
310 (im->combined_sw_if_counters +
311 VNET_INTERFACE_COUNTER_TX, thread_index,
312 stats_sw_if_index, stats_n_packets, stats_n_bytes);
313 stats_sw_if_index = sw_if_index0;
314 stats_n_packets = 2;
315 stats_n_bytes = len0 + len1;
316 }
317 else
318 {
319 vlib_increment_combined_counter
320 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
321 thread_index, sw_if_index0, 1, len0);
322 vlib_increment_combined_counter
323 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
324 thread_index, sw_if_index1, 1, len1);
325 }
326 }
327
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400328 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
Marco Varleseb598f1d2017-09-19 14:25:28 +0200329 {
330 geneve_encap_trace_t *tr =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400331 vlib_add_trace (vm, node, b[0], sizeof (*tr));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200332 tr->tunnel_index = t0 - vxm->tunnels;
333 tr->vni = t0->vni;
334 }
335
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400336 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
Marco Varleseb598f1d2017-09-19 14:25:28 +0200337 {
338 geneve_encap_trace_t *tr =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400339 vlib_add_trace (vm, node, b[1], sizeof (*tr));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200340 tr->tunnel_index = t1 - vxm->tunnels;
341 tr->vni = t1->vni;
342 }
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400343 b += 2;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200344
345 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
346 to_next, n_left_to_next,
347 bi0, bi1, next0, next1);
348 }
349
350 while (n_left_from > 0 && n_left_to_next > 0)
351 {
352 u32 bi0;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200353 u32 flow_hash0;
354 u32 len0;
355 ip4_header_t *ip4_0;
356 ip6_header_t *ip6_0;
357 udp_header_t *udp0;
358 u64 *copy_src0, *copy_dst0;
359 u32 *copy_src_last0, *copy_dst_last0;
360 u16 new_l0;
361 ip_csum_t sum0;
362
363 bi0 = from[0];
364 to_next[0] = bi0;
365 from += 1;
366 to_next += 1;
367 n_left_from -= 1;
368 n_left_to_next -= 1;
369
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400370 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200371
372 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400373 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200374 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400375 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
Marco Varleseb598f1d2017-09-19 14:25:28 +0200376 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
377 t0 = &vxm->tunnels[hi0->dev_instance];
378 /* Note: change to always set next0 if it may be set to drop */
379 next0 = t0->next_dpo.dpoi_next_node;
380 }
Dave Barach47d41ad2020-02-17 09:13:26 -0500381
382 ALWAYS_ASSERT (t0 != NULL);
383
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400384 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200385
386 /* Apply the rewrite string. $$$$ vnet_rewrite? */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400387 vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200388
389 if (is_ip4)
390 {
391 u8 ip4_geneve_base_header_len =
392 sizeof (ip4_header_t) + sizeof (udp_header_t) +
393 GENEVE_BASE_HEADER_LENGTH;
394 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
395#if SUPPORT_OPTIONS_HEADER==1
396 ip4_geneve_header_total_len0 += t0->options_len;
397#endif
398 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
399
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400400 ip4_0 = vlib_buffer_get_current (b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200401
402 /* Copy the fixed header */
403 copy_dst0 = (u64 *) ip4_0;
404 copy_src0 = (u64 *) t0->rewrite;
405 /* Copy first 32 octets 8-bytes at a time */
406#define _(offs) copy_dst0[offs] = copy_src0[offs];
407 foreach_fixed_header4_offset;
408#undef _
409 /* Last 4 octets. Hopefully gcc will be our friend */
410 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
411 copy_src_last0 = (u32 *) (&copy_src0[4]);
412 copy_dst_last0[0] = copy_src_last0[0];
413
414 /* Fix the IP4 checksum and length */
415 sum0 = ip4_0->checksum;
416 new_l0 = /* old_l0 always 0, see the rewrite setup */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400417 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200418 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
419 length /* changed member */ );
420 ip4_0->checksum = ip_csum_fold (sum0);
421 ip4_0->length = new_l0;
422
423 /* Fix UDP length and set source port */
424 udp0 = (udp_header_t *) (ip4_0 + 1);
425 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400426 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
Marco Varleseb598f1d2017-09-19 14:25:28 +0200427 sizeof (*ip4_0));
428 udp0->length = new_l0;
429 udp0->src_port = flow_hash0;
430 }
431
432 else /* ip6 path */
433 {
434 int bogus = 0;
435
436 u8 ip6_geneve_base_header_len =
437 sizeof (ip6_header_t) + sizeof (udp_header_t) +
438 GENEVE_BASE_HEADER_LENGTH;
439 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
440#if SUPPORT_OPTIONS_HEADER==1
441 ip6_geneve_header_total_len0 += t0->options_len;
442#endif
443 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
444
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400445 ip6_0 = vlib_buffer_get_current (b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200446 /* Copy the fixed header */
447 copy_dst0 = (u64 *) ip6_0;
448 copy_src0 = (u64 *) t0->rewrite;
449 /* Copy first 56 (ip6) octets 8-bytes at a time */
450#define _(offs) copy_dst0[offs] = copy_src0[offs];
451 foreach_fixed_header6_offset;
452#undef _
453 /* Fix IP6 payload length */
454 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400455 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200456 - sizeof (*ip6_0));
457 ip6_0->payload_length = new_l0;
458
459 /* Fix UDP length and set source port */
460 udp0 = (udp_header_t *) (ip6_0 + 1);
461 udp0->length = new_l0;
462 udp0->src_port = flow_hash0;
463
464 /* IPv6 UDP checksum is mandatory */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400465 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
Marco Varleseb598f1d2017-09-19 14:25:28 +0200466 ip6_0,
467 &bogus);
468 ASSERT (bogus == 0);
469 if (udp0->checksum == 0)
470 udp0->checksum = 0xffff;
471 }
472
473 pkts_encapsulated++;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400474 len0 = vlib_buffer_length_in_chain (vm, b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200475 stats_n_packets += 1;
476 stats_n_bytes += len0;
477
Shawn Ji623b4f82019-12-18 10:10:54 +0800478 /* save inner packet flow_hash for load-balance node */
479 vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
480
Marco Varleseb598f1d2017-09-19 14:25:28 +0200481 /* Batch stats increment on the same geneve tunnel so counter is not
482 incremented per packet. Note stats are still incremented for deleted
483 and admin-down tunnel where packets are dropped. It is not worthwhile
484 to check for this rare case and affect normal path performance. */
485 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
486 {
487 stats_n_packets -= 1;
488 stats_n_bytes -= len0;
489 if (stats_n_packets)
490 vlib_increment_combined_counter
491 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
492 thread_index, stats_sw_if_index,
493 stats_n_packets, stats_n_bytes);
494 stats_n_packets = 1;
495 stats_n_bytes = len0;
496 stats_sw_if_index = sw_if_index0;
497 }
498
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400499 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
Marco Varleseb598f1d2017-09-19 14:25:28 +0200500 {
501 geneve_encap_trace_t *tr =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400502 vlib_add_trace (vm, node, b[0], sizeof (*tr));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200503 tr->tunnel_index = t0 - vxm->tunnels;
504 tr->vni = t0->vni;
505 }
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400506 b += 1;
507
Marco Varleseb598f1d2017-09-19 14:25:28 +0200508 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
509 to_next, n_left_to_next,
510 bi0, next0);
511 }
512
513 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
514 }
515
516 /* Do we still need this now that tunnel tx stats is kept? */
517 vlib_node_increment_counter (vm, node->node_index,
518 GENEVE_ENCAP_ERROR_ENCAPSULATED,
519 pkts_encapsulated);
520
521 /* Increment any remaining batch stats */
522 if (stats_n_packets)
523 {
524 vlib_increment_combined_counter
525 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
526 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
527 node->runtime_data[0] = stats_sw_if_index;
528 }
529
530 return from_frame->n_vectors;
531}
532
Filip Tehlar55333d72019-03-05 00:36:04 -0800533VLIB_NODE_FN (geneve4_encap_node) (vlib_main_t * vm,
534 vlib_node_runtime_t * node,
535 vlib_frame_t * from_frame)
Marco Varleseb598f1d2017-09-19 14:25:28 +0200536{
537 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
538}
539
Filip Tehlar55333d72019-03-05 00:36:04 -0800540VLIB_NODE_FN (geneve6_encap_node) (vlib_main_t * vm,
541 vlib_node_runtime_t * node,
542 vlib_frame_t * from_frame)
Marco Varleseb598f1d2017-09-19 14:25:28 +0200543{
544 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
545}
546
547/* *INDENT-OFF* */
548VLIB_REGISTER_NODE (geneve4_encap_node) = {
Marco Varleseb598f1d2017-09-19 14:25:28 +0200549 .name = "geneve4-encap",
550 .vector_size = sizeof (u32),
551 .format_trace = format_geneve_encap_trace,
552 .type = VLIB_NODE_TYPE_INTERNAL,
553 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
554 .error_strings = geneve_encap_error_strings,
555 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
556 .next_nodes = {
557 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
558 },
559};
560
Marco Varleseb598f1d2017-09-19 14:25:28 +0200561VLIB_REGISTER_NODE (geneve6_encap_node) = {
Marco Varleseb598f1d2017-09-19 14:25:28 +0200562 .name = "geneve6-encap",
563 .vector_size = sizeof (u32),
564 .format_trace = format_geneve_encap_trace,
565 .type = VLIB_NODE_TYPE_INTERNAL,
566 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
567 .error_strings = geneve_encap_error_strings,
568 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
569 .next_nodes = {
570 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
571 },
572};
Marco Varleseb598f1d2017-09-19 14:25:28 +0200573/* *INDENT-ON* */
574
575/*
576 * fd.io coding-style-patch-verification: ON
577 *
578 * Local Variables:
579 * eval: (c-set-style "gnu")
580 * End:
581 */