blob: 8e59aea362684cfd3fd520a9f063bccaaa13f066 [file] [log] [blame]
Marco Varleseb598f1d2017-09-19 14:25:28 +02001/*
2 * Copyright (c) 2017 SUSE LLC.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vppinfra/error.h>
16#include <vppinfra/hash.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/ip.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/geneve/geneve.h>
21
22/* Statistics (not all errors) */
23#define foreach_geneve_encap_error \
24_(ENCAPSULATED, "good packets encapsulated")
25
26static char *geneve_encap_error_strings[] = {
27#define _(sym,string) string,
28 foreach_geneve_encap_error
29#undef _
30};
31
32typedef enum
33{
34#define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
35 foreach_geneve_encap_error
36#undef _
37 GENEVE_ENCAP_N_ERROR,
38} geneve_encap_error_t;
39
40typedef enum
41{
42 GENEVE_ENCAP_NEXT_DROP,
43 GENEVE_ENCAP_N_NEXT,
44} geneve_encap_next_t;
45
Marco Varleseb598f1d2017-09-19 14:25:28 +020046#define foreach_fixed_header4_offset \
47 _(0) _(1) _(2) _(3)
48
49#define foreach_fixed_header6_offset \
50 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
51
52always_inline uword
53geneve_encap_inline (vlib_main_t * vm,
54 vlib_node_runtime_t * node,
55 vlib_frame_t * from_frame, u32 is_ip4)
56{
57 u32 n_left_from, next_index, *from, *to_next;
58 geneve_main_t *vxm = &geneve_main;
59 vnet_main_t *vnm = vxm->vnet_main;
60 vnet_interface_main_t *im = &vnm->interface_main;
61 u32 pkts_encapsulated = 0;
62 u16 old_l0 = 0, old_l1 = 0;
Damjan Marion067cd622018-07-11 12:47:43 +020063 u32 thread_index = vm->thread_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +020064 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
Zhiyong Yange4b202a2018-09-04 21:43:57 -040065 u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
Marco Varleseb598f1d2017-09-19 14:25:28 +020066 u32 next0 = 0, next1 = 0;
67 vnet_hw_interface_t *hi0, *hi1;
68 geneve_tunnel_t *t0 = NULL, *t1 = NULL;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -040069 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
Marco Varleseb598f1d2017-09-19 14:25:28 +020070
71 from = vlib_frame_vector_args (from_frame);
72 n_left_from = from_frame->n_vectors;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -040073 vlib_get_buffers (vm, from, bufs, n_left_from);
Marco Varleseb598f1d2017-09-19 14:25:28 +020074
75 next_index = node->cached_next_index;
76 stats_sw_if_index = node->runtime_data[0];
77 stats_n_packets = stats_n_bytes = 0;
78
79 while (n_left_from > 0)
80 {
81 u32 n_left_to_next;
82
83 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
84
85 while (n_left_from >= 4 && n_left_to_next >= 2)
86 {
87 u32 bi0, bi1;
Marco Varleseb598f1d2017-09-19 14:25:28 +020088 u32 flow_hash0, flow_hash1;
89 u32 len0, len1;
90 ip4_header_t *ip4_0, *ip4_1;
91 ip6_header_t *ip6_0, *ip6_1;
92 udp_header_t *udp0, *udp1;
93 u64 *copy_src0, *copy_dst0;
94 u64 *copy_src1, *copy_dst1;
95 u32 *copy_src_last0, *copy_dst_last0;
96 u32 *copy_src_last1, *copy_dst_last1;
97 u16 new_l0, new_l1;
98 ip_csum_t sum0, sum1;
99
100 /* Prefetch next iteration. */
101 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400102 vlib_prefetch_buffer_header (b[2], LOAD);
103 vlib_prefetch_buffer_header (b[3], LOAD);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200104
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400105 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
106 2 * CLIB_CACHE_LINE_BYTES, LOAD);
107 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
108 2 * CLIB_CACHE_LINE_BYTES, LOAD);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200109 }
110
111 bi0 = from[0];
112 bi1 = from[1];
113 to_next[0] = bi0;
114 to_next[1] = bi1;
115 from += 2;
116 to_next += 2;
117 n_left_to_next -= 2;
118 n_left_from -= 2;
119
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400120 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
121 flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200122
123 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400124 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200125 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400126 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
Marco Varleseb598f1d2017-09-19 14:25:28 +0200127 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
128 t0 = &vxm->tunnels[hi0->dev_instance];
129 /* Note: change to always set next0 if it may be set to drop */
130 next0 = t0->next_dpo.dpoi_next_node;
131 }
Marco Varlese7af4a572017-11-27 16:46:04 +0100132
133 ASSERT (t0 != NULL);
134
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400135 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200136
137 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400138 if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200139 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400140 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
Marco Varleseb598f1d2017-09-19 14:25:28 +0200141 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
142 t1 = &vxm->tunnels[hi1->dev_instance];
143 /* Note: change to always set next1 if it may be set to drop */
144 next1 = t1->next_dpo.dpoi_next_node;
145 }
Marco Varlese7af4a572017-11-27 16:46:04 +0100146
147 ASSERT (t1 != NULL);
148
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400149 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200150
151 /* Apply the rewrite string. $$$$ vnet_rewrite? */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400152 vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
153 vlib_buffer_advance (b[1], -(word) _vec_len (t1->rewrite));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200154
155 if (is_ip4)
156 {
157 u8 ip4_geneve_base_header_len =
158 sizeof (ip4_header_t) + sizeof (udp_header_t) +
159 GENEVE_BASE_HEADER_LENGTH;
160 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
161 u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
162#if SUPPORT_OPTIONS_HEADER==1
163 ip4_geneve_header_total_len0 += t0->options_len;
164 ip4_geneve_header_total_len1 += t1->options_len;
165#endif
166 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
167 ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
168
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400169 ip4_0 = vlib_buffer_get_current (b[0]);
170 ip4_1 = vlib_buffer_get_current (b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200171
172 /* Copy the fixed header */
173 copy_dst0 = (u64 *) ip4_0;
174 copy_src0 = (u64 *) t0->rewrite;
175 copy_dst1 = (u64 *) ip4_1;
176 copy_src1 = (u64 *) t1->rewrite;
177 /* Copy first 32 octets 8-bytes at a time */
178#define _(offs) copy_dst0[offs] = copy_src0[offs];
179 foreach_fixed_header4_offset;
180#undef _
181#define _(offs) copy_dst1[offs] = copy_src1[offs];
182 foreach_fixed_header4_offset;
183#undef _
184 /* Last 4 octets. Hopefully gcc will be our friend */
185 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
186 copy_src_last0 = (u32 *) (&copy_src0[4]);
187 copy_dst_last0[0] = copy_src_last0[0];
188 copy_dst_last1 = (u32 *) (&copy_dst1[4]);
189 copy_src_last1 = (u32 *) (&copy_src1[4]);
190 copy_dst_last1[0] = copy_src_last1[0];
191
192 /* Fix the IP4 checksum and length */
193 sum0 = ip4_0->checksum;
194 new_l0 = /* old_l0 always 0, see the rewrite setup */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400195 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200196 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
197 length /* changed member */ );
198 ip4_0->checksum = ip_csum_fold (sum0);
199 ip4_0->length = new_l0;
200 sum1 = ip4_1->checksum;
201 new_l1 = /* old_l1 always 0, see the rewrite setup */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400202 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200203 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
204 length /* changed member */ );
205 ip4_1->checksum = ip_csum_fold (sum1);
206 ip4_1->length = new_l1;
207
208 /* Fix UDP length and set source port */
209 udp0 = (udp_header_t *) (ip4_0 + 1);
210 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400211 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
Marco Varleseb598f1d2017-09-19 14:25:28 +0200212 sizeof (*ip4_0));
213 udp0->length = new_l0;
214 udp0->src_port = flow_hash0;
215 udp1 = (udp_header_t *) (ip4_1 + 1);
216 new_l1 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400217 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]) -
Marco Varleseb598f1d2017-09-19 14:25:28 +0200218 sizeof (*ip4_1));
219 udp1->length = new_l1;
220 udp1->src_port = flow_hash1;
221 }
222 else /* ipv6 */
223 {
224 int bogus = 0;
225
226 u8 ip6_geneve_base_header_len =
227 sizeof (ip6_header_t) + sizeof (udp_header_t) +
228 GENEVE_BASE_HEADER_LENGTH;
229 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
230 u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
231#if SUPPORT_OPTIONS_HEADER==1
232 ip6_geneve_header_total_len0 += t0->options_len;
233 ip6_geneve_header_total_len1 += t1->options_len;
234#endif
235 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
236 ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
237
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400238 ip6_0 = vlib_buffer_get_current (b[0]);
239 ip6_1 = vlib_buffer_get_current (b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200240
241 /* Copy the fixed header */
242 copy_dst0 = (u64 *) ip6_0;
243 copy_src0 = (u64 *) t0->rewrite;
244 copy_dst1 = (u64 *) ip6_1;
245 copy_src1 = (u64 *) t1->rewrite;
246 /* Copy first 56 (ip6) octets 8-bytes at a time */
247#define _(offs) copy_dst0[offs] = copy_src0[offs];
248 foreach_fixed_header6_offset;
249#undef _
250#define _(offs) copy_dst1[offs] = copy_src1[offs];
251 foreach_fixed_header6_offset;
252#undef _
253 /* Fix IP6 payload length */
254 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400255 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200256 - sizeof (*ip6_0));
257 ip6_0->payload_length = new_l0;
258 new_l1 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400259 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200260 - sizeof (*ip6_1));
261 ip6_1->payload_length = new_l1;
262
263 /* Fix UDP length and set source port */
264 udp0 = (udp_header_t *) (ip6_0 + 1);
265 udp0->length = new_l0;
266 udp0->src_port = flow_hash0;
267 udp1 = (udp_header_t *) (ip6_1 + 1);
268 udp1->length = new_l1;
269 udp1->src_port = flow_hash1;
270
271 /* IPv6 UDP checksum is mandatory */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400272 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
Marco Varleseb598f1d2017-09-19 14:25:28 +0200273 ip6_0,
274 &bogus);
275 ASSERT (bogus == 0);
276 if (udp0->checksum == 0)
277 udp0->checksum = 0xffff;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400278 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[1],
Marco Varleseb598f1d2017-09-19 14:25:28 +0200279 ip6_1,
280 &bogus);
281 ASSERT (bogus == 0);
282 if (udp1->checksum == 0)
283 udp1->checksum = 0xffff;
284 }
285
286 pkts_encapsulated += 2;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400287 len0 = vlib_buffer_length_in_chain (vm, b[0]);
288 len1 = vlib_buffer_length_in_chain (vm, b[1]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200289 stats_n_packets += 2;
290 stats_n_bytes += len0 + len1;
291
292 /* Batch stats increment on the same geneve tunnel so counter is not
293 incremented per packet. Note stats are still incremented for deleted
294 and admin-down tunnel where packets are dropped. It is not worthwhile
295 to check for this rare case and affect normal path performance. */
296 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
297 (sw_if_index1 != stats_sw_if_index)))
298 {
299 stats_n_packets -= 2;
300 stats_n_bytes -= len0 + len1;
301 if (sw_if_index0 == sw_if_index1)
302 {
303 if (stats_n_packets)
304 vlib_increment_combined_counter
305 (im->combined_sw_if_counters +
306 VNET_INTERFACE_COUNTER_TX, thread_index,
307 stats_sw_if_index, stats_n_packets, stats_n_bytes);
308 stats_sw_if_index = sw_if_index0;
309 stats_n_packets = 2;
310 stats_n_bytes = len0 + len1;
311 }
312 else
313 {
314 vlib_increment_combined_counter
315 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
316 thread_index, sw_if_index0, 1, len0);
317 vlib_increment_combined_counter
318 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
319 thread_index, sw_if_index1, 1, len1);
320 }
321 }
322
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400323 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
Marco Varleseb598f1d2017-09-19 14:25:28 +0200324 {
325 geneve_encap_trace_t *tr =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400326 vlib_add_trace (vm, node, b[0], sizeof (*tr));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200327 tr->tunnel_index = t0 - vxm->tunnels;
328 tr->vni = t0->vni;
329 }
330
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400331 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
Marco Varleseb598f1d2017-09-19 14:25:28 +0200332 {
333 geneve_encap_trace_t *tr =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400334 vlib_add_trace (vm, node, b[1], sizeof (*tr));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200335 tr->tunnel_index = t1 - vxm->tunnels;
336 tr->vni = t1->vni;
337 }
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400338 b += 2;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200339
340 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
341 to_next, n_left_to_next,
342 bi0, bi1, next0, next1);
343 }
344
345 while (n_left_from > 0 && n_left_to_next > 0)
346 {
347 u32 bi0;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200348 u32 flow_hash0;
349 u32 len0;
350 ip4_header_t *ip4_0;
351 ip6_header_t *ip6_0;
352 udp_header_t *udp0;
353 u64 *copy_src0, *copy_dst0;
354 u32 *copy_src_last0, *copy_dst_last0;
355 u16 new_l0;
356 ip_csum_t sum0;
357
358 bi0 = from[0];
359 to_next[0] = bi0;
360 from += 1;
361 to_next += 1;
362 n_left_from -= 1;
363 n_left_to_next -= 1;
364
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400365 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200366
367 /* Get next node index and adj index from tunnel next_dpo */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400368 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200369 {
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400370 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
Marco Varleseb598f1d2017-09-19 14:25:28 +0200371 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
372 t0 = &vxm->tunnels[hi0->dev_instance];
373 /* Note: change to always set next0 if it may be set to drop */
374 next0 = t0->next_dpo.dpoi_next_node;
375 }
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400376 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
Marco Varleseb598f1d2017-09-19 14:25:28 +0200377
378 /* Apply the rewrite string. $$$$ vnet_rewrite? */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400379 vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200380
381 if (is_ip4)
382 {
383 u8 ip4_geneve_base_header_len =
384 sizeof (ip4_header_t) + sizeof (udp_header_t) +
385 GENEVE_BASE_HEADER_LENGTH;
386 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
387#if SUPPORT_OPTIONS_HEADER==1
388 ip4_geneve_header_total_len0 += t0->options_len;
389#endif
390 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
391
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400392 ip4_0 = vlib_buffer_get_current (b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200393
394 /* Copy the fixed header */
395 copy_dst0 = (u64 *) ip4_0;
396 copy_src0 = (u64 *) t0->rewrite;
397 /* Copy first 32 octets 8-bytes at a time */
398#define _(offs) copy_dst0[offs] = copy_src0[offs];
399 foreach_fixed_header4_offset;
400#undef _
401 /* Last 4 octets. Hopefully gcc will be our friend */
402 copy_dst_last0 = (u32 *) (&copy_dst0[4]);
403 copy_src_last0 = (u32 *) (&copy_src0[4]);
404 copy_dst_last0[0] = copy_src_last0[0];
405
406 /* Fix the IP4 checksum and length */
407 sum0 = ip4_0->checksum;
408 new_l0 = /* old_l0 always 0, see the rewrite setup */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400409 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200410 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
411 length /* changed member */ );
412 ip4_0->checksum = ip_csum_fold (sum0);
413 ip4_0->length = new_l0;
414
415 /* Fix UDP length and set source port */
416 udp0 = (udp_header_t *) (ip4_0 + 1);
417 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400418 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
Marco Varleseb598f1d2017-09-19 14:25:28 +0200419 sizeof (*ip4_0));
420 udp0->length = new_l0;
421 udp0->src_port = flow_hash0;
422 }
423
424 else /* ip6 path */
425 {
426 int bogus = 0;
427
428 u8 ip6_geneve_base_header_len =
429 sizeof (ip6_header_t) + sizeof (udp_header_t) +
430 GENEVE_BASE_HEADER_LENGTH;
431 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
432#if SUPPORT_OPTIONS_HEADER==1
433 ip6_geneve_header_total_len0 += t0->options_len;
434#endif
435 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
436
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400437 ip6_0 = vlib_buffer_get_current (b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200438 /* Copy the fixed header */
439 copy_dst0 = (u64 *) ip6_0;
440 copy_src0 = (u64 *) t0->rewrite;
441 /* Copy first 56 (ip6) octets 8-bytes at a time */
442#define _(offs) copy_dst0[offs] = copy_src0[offs];
443 foreach_fixed_header6_offset;
444#undef _
445 /* Fix IP6 payload length */
446 new_l0 =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400447 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
Marco Varleseb598f1d2017-09-19 14:25:28 +0200448 - sizeof (*ip6_0));
449 ip6_0->payload_length = new_l0;
450
451 /* Fix UDP length and set source port */
452 udp0 = (udp_header_t *) (ip6_0 + 1);
453 udp0->length = new_l0;
454 udp0->src_port = flow_hash0;
455
456 /* IPv6 UDP checksum is mandatory */
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400457 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
Marco Varleseb598f1d2017-09-19 14:25:28 +0200458 ip6_0,
459 &bogus);
460 ASSERT (bogus == 0);
461 if (udp0->checksum == 0)
462 udp0->checksum = 0xffff;
463 }
464
465 pkts_encapsulated++;
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400466 len0 = vlib_buffer_length_in_chain (vm, b[0]);
Marco Varleseb598f1d2017-09-19 14:25:28 +0200467 stats_n_packets += 1;
468 stats_n_bytes += len0;
469
470 /* Batch stats increment on the same geneve tunnel so counter is not
471 incremented per packet. Note stats are still incremented for deleted
472 and admin-down tunnel where packets are dropped. It is not worthwhile
473 to check for this rare case and affect normal path performance. */
474 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
475 {
476 stats_n_packets -= 1;
477 stats_n_bytes -= len0;
478 if (stats_n_packets)
479 vlib_increment_combined_counter
480 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
481 thread_index, stats_sw_if_index,
482 stats_n_packets, stats_n_bytes);
483 stats_n_packets = 1;
484 stats_n_bytes = len0;
485 stats_sw_if_index = sw_if_index0;
486 }
487
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400488 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
Marco Varleseb598f1d2017-09-19 14:25:28 +0200489 {
490 geneve_encap_trace_t *tr =
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400491 vlib_add_trace (vm, node, b[0], sizeof (*tr));
Marco Varleseb598f1d2017-09-19 14:25:28 +0200492 tr->tunnel_index = t0 - vxm->tunnels;
493 tr->vni = t0->vni;
494 }
Zhiyong Yanga80d9cf2019-05-20 01:31:38 -0400495 b += 1;
496
Marco Varleseb598f1d2017-09-19 14:25:28 +0200497 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
498 to_next, n_left_to_next,
499 bi0, next0);
500 }
501
502 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
503 }
504
505 /* Do we still need this now that tunnel tx stats is kept? */
506 vlib_node_increment_counter (vm, node->node_index,
507 GENEVE_ENCAP_ERROR_ENCAPSULATED,
508 pkts_encapsulated);
509
510 /* Increment any remaining batch stats */
511 if (stats_n_packets)
512 {
513 vlib_increment_combined_counter
514 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
515 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
516 node->runtime_data[0] = stats_sw_if_index;
517 }
518
519 return from_frame->n_vectors;
520}
521
Filip Tehlar55333d72019-03-05 00:36:04 -0800522VLIB_NODE_FN (geneve4_encap_node) (vlib_main_t * vm,
523 vlib_node_runtime_t * node,
524 vlib_frame_t * from_frame)
Marco Varleseb598f1d2017-09-19 14:25:28 +0200525{
526 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
527}
528
Filip Tehlar55333d72019-03-05 00:36:04 -0800529VLIB_NODE_FN (geneve6_encap_node) (vlib_main_t * vm,
530 vlib_node_runtime_t * node,
531 vlib_frame_t * from_frame)
Marco Varleseb598f1d2017-09-19 14:25:28 +0200532{
533 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
534}
535
536/* *INDENT-OFF* */
537VLIB_REGISTER_NODE (geneve4_encap_node) = {
Marco Varleseb598f1d2017-09-19 14:25:28 +0200538 .name = "geneve4-encap",
539 .vector_size = sizeof (u32),
540 .format_trace = format_geneve_encap_trace,
541 .type = VLIB_NODE_TYPE_INTERNAL,
542 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
543 .error_strings = geneve_encap_error_strings,
544 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
545 .next_nodes = {
546 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
547 },
548};
549
Marco Varleseb598f1d2017-09-19 14:25:28 +0200550VLIB_REGISTER_NODE (geneve6_encap_node) = {
Marco Varleseb598f1d2017-09-19 14:25:28 +0200551 .name = "geneve6-encap",
552 .vector_size = sizeof (u32),
553 .format_trace = format_geneve_encap_trace,
554 .type = VLIB_NODE_TYPE_INTERNAL,
555 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
556 .error_strings = geneve_encap_error_strings,
557 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
558 .next_nodes = {
559 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
560 },
561};
Marco Varleseb598f1d2017-09-19 14:25:28 +0200562/* *INDENT-ON* */
563
564/*
565 * fd.io coding-style-patch-verification: ON
566 *
567 * Local Variables:
568 * eval: (c-set-style "gnu")
569 * End:
570 */