blob: a48a8b69a23b904bd817876ea371dba22423e237 [file] [log] [blame]
Mohsin Kazmi29467b52019-10-08 19:42:38 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vlib/vlib.h>
17#include <vnet/vnet.h>
18#include <vppinfra/error.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/feature/feature.h>
21#include <vnet/gso/gso.h>
22#include <vnet/ip/icmp46_packet.h>
23#include <vnet/ip/ip4.h>
24#include <vnet/ip/ip6.h>
25#include <vnet/udp/udp_packet.h>
26
27typedef struct
28{
29 u32 flags;
30 u16 gso_size;
31 u8 gso_l4_hdr_sz;
32} gso_trace_t;
33
34static u8 *
35format_gso_trace (u8 * s, va_list * args)
36{
37 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
38 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
39 gso_trace_t *t = va_arg (*args, gso_trace_t *);
40
41 if (t->flags & VNET_BUFFER_F_GSO)
42 {
43 s = format (s, "gso_sz %d gso_l4_hdr_sz %d",
44 t->gso_size, t->gso_l4_hdr_sz);
45 }
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +010046 else
47 {
48 s = format (s, "non-gso buffer");
49 }
Mohsin Kazmi29467b52019-10-08 19:42:38 +020050
51 return s;
52}
53
54static_always_inline u16
55tso_alloc_tx_bufs (vlib_main_t * vm,
56 vnet_interface_per_thread_data_t * ptd,
57 vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
Mohsin Kazmi222b7092019-12-25 00:12:52 +010058 u16 gso_size, gso_header_offset_t * gho)
Mohsin Kazmi29467b52019-10-08 19:42:38 +020059{
60 u16 size =
Mohsin Kazmi222b7092019-12-25 00:12:52 +010061 clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
62 - gho->l2_hdr_offset);
Mohsin Kazmi29467b52019-10-08 19:42:38 +020063
64 /* rounded-up division */
65 u16 n_bufs = (n_bytes_b0 - l234_sz + (size - 1)) / size;
66 u16 n_alloc;
67
68 ASSERT (n_bufs > 0);
69 vec_validate (ptd->split_buffers, n_bufs - 1);
70
71 n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
72 if (n_alloc < n_bufs)
73 {
74 vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
75 return 0;
76 }
77 return n_alloc;
78}
79
80static_always_inline void
81tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
82 u32 flags, u16 length)
83{
84 nb0->current_data = b0->current_data;
85 nb0->total_length_not_including_first_buffer = 0;
86 nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
BenoƮt Ganne94afc932019-10-23 15:47:22 +020087 nb0->trace_handle = b0->trace_handle;
Mohsin Kazmi29467b52019-10-08 19:42:38 +020088 clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
89 clib_memcpy_fast (vlib_buffer_get_current (nb0),
90 vlib_buffer_get_current (b0), length);
91 nb0->current_length = length;
92}
93
94static_always_inline void
95tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
96 vlib_buffer_t * b0, u16 template_data_sz,
97 u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
Mohsin Kazmi72e73122019-10-22 13:33:13 +020098 u32 next_tcp_seq, u32 flags,
99 gso_header_offset_t * gho)
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200100{
101 tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
102
103 *p_dst_left =
104 clib_min (gso_size,
105 vlib_buffer_get_default_data_size (vm) - (template_data_sz +
106 nb0->current_data));
107 *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
108
109 tcp_header_t *tcp =
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200110 (tcp_header_t *) (vlib_buffer_get_current (nb0) + gho->l4_hdr_offset);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200111 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
112}
113
114static_always_inline void
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200115tso_fixup_segmented_buf (vlib_buffer_t * b0, u8 tcp_flags, int is_ip6,
116 gso_header_offset_t * gho)
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200117{
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200118 ip4_header_t *ip4 =
119 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
120 ip6_header_t *ip6 =
121 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
122 tcp_header_t *tcp =
123 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200124
125 tcp->flags = tcp_flags;
126
127 if (is_ip6)
128 ip6->payload_length =
129 clib_host_to_net_u16 (b0->current_length -
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200130 (gho->l4_hdr_offset - gho->l2_hdr_offset));
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200131 else
132 ip4->length =
133 clib_host_to_net_u16 (b0->current_length -
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200134 (gho->l3_hdr_offset - gho->l2_hdr_offset));
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200135}
136
137/**
138 * Allocate the necessary number of ptd->split_buffers,
139 * and segment the possibly chained buffer(s) from b0 into
140 * there.
141 *
142 * Return the cumulative number of bytes sent or zero
143 * if allocation failed.
144 */
145
146static_always_inline u32
147tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200148 u32 sbi0, vlib_buffer_t * sb0, gso_header_offset_t * gho,
149 u32 n_bytes_b0, int is_ip6)
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200150{
151 u32 n_tx_bytes = 0;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200152 u16 gso_size = vnet_buffer2 (sb0)->gso_size;
153
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200154 int l4_hdr_sz = gho->l4_hdr_sz;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200155 u8 save_tcp_flags = 0;
156 u8 tcp_flags_no_fin_psh = 0;
157 u32 next_tcp_seq = 0;
158
159 tcp_header_t *tcp =
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200160 (tcp_header_t *) (vlib_buffer_get_current (sb0) + gho->l4_hdr_offset);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200161 next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
162 /* store original flags for last packet and reset FIN and PSH */
163 save_tcp_flags = tcp->flags;
164 tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
165 tcp->checksum = 0;
166
167 u32 default_bflags =
168 sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200169 u16 l234_sz = gho->l4_hdr_offset + l4_hdr_sz - gho->l2_hdr_offset;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200170 int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
171 next_tcp_seq += first_data_size;
172
173 if (PREDICT_FALSE
Mohsin Kazmi222b7092019-12-25 00:12:52 +0100174 (!tso_alloc_tx_bufs (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, gho)))
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200175 return 0;
176
177 vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
178 tso_init_buf_from_template_base (b0, sb0, default_bflags,
179 l234_sz + first_data_size);
180
181 u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
182 if (total_src_left)
183 {
184 /* Need to copy more segments */
185 u8 *src_ptr, *dst_ptr;
186 u16 src_left, dst_left;
187 /* current source buffer */
188 vlib_buffer_t *csb0 = sb0;
189 u32 csbi0 = sbi0;
190 /* current dest buffer */
191 vlib_buffer_t *cdb0;
192 u16 dbi = 1; /* the buffer [0] is b0 */
193
194 src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
195 src_left = sb0->current_length - l234_sz - first_data_size;
196
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200197 tso_fixup_segmented_buf (b0, tcp_flags_no_fin_psh, is_ip6, gho);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200198
199 /* grab a second buffer and prepare the loop */
200 ASSERT (dbi < vec_len (ptd->split_buffers));
201 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
202 tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200203 &dst_left, next_tcp_seq, default_bflags,
204 gho);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200205
206 /* an arbitrary large number to catch the runaway loops */
207 int nloops = 2000;
208 while (total_src_left)
209 {
210 if (nloops-- <= 0)
211 clib_panic ("infinite loop detected");
212 u16 bytes_to_copy = clib_min (src_left, dst_left);
213
214 clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
215
216 src_left -= bytes_to_copy;
217 src_ptr += bytes_to_copy;
218 total_src_left -= bytes_to_copy;
219 dst_left -= bytes_to_copy;
220 dst_ptr += bytes_to_copy;
221 next_tcp_seq += bytes_to_copy;
222 cdb0->current_length += bytes_to_copy;
223
224 if (0 == src_left)
225 {
226 int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
227 u32 next_bi = csb0->next_buffer;
228
229 /* init src to the next buffer in chain */
230 if (has_next)
231 {
232 csbi0 = next_bi;
233 csb0 = vlib_get_buffer (vm, csbi0);
234 src_left = csb0->current_length;
235 src_ptr = vlib_buffer_get_current (csb0);
236 }
237 else
238 {
239 ASSERT (total_src_left == 0);
240 break;
241 }
242 }
243 if (0 == dst_left && total_src_left)
244 {
245 n_tx_bytes += cdb0->current_length;
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100246 tso_fixup_segmented_buf (cdb0, tcp_flags_no_fin_psh, is_ip6,
247 gho);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200248 ASSERT (dbi < vec_len (ptd->split_buffers));
249 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
250 tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
251 gso_size, &dst_ptr, &dst_left,
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200252 next_tcp_seq, default_bflags, gho);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200253 }
254 }
255
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200256 tso_fixup_segmented_buf (cdb0, save_tcp_flags, is_ip6, gho);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200257
258 n_tx_bytes += cdb0->current_length;
259 }
260 n_tx_bytes += b0->current_length;
261 return n_tx_bytes;
262}
263
264static_always_inline void
265drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
266 vlib_node_runtime_t * node, u32 * pbi0,
267 u32 sw_if_index, u32 drop_error_code)
268{
269 u32 thread_index = vm->thread_index;
270
271 vlib_simple_counter_main_t *cm;
272 cm =
273 vec_elt_at_index (vnm->interface_main.sw_if_counters,
274 VNET_INTERFACE_COUNTER_TX_ERROR);
275 vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
276
277 vlib_error_drop_buffers (vm, node, pbi0,
278 /* buffer stride */ 1,
279 /* n_buffers */ 1,
280 VNET_INTERFACE_OUTPUT_NEXT_DROP,
281 node->node_index, drop_error_code);
282}
283
284static_always_inline uword
285vnet_gso_node_inline (vlib_main_t * vm,
286 vlib_node_runtime_t * node,
287 vlib_frame_t * frame,
288 vnet_main_t * vnm,
289 vnet_hw_interface_t * hi,
290 int is_ip6, int do_segmentation)
291{
292 u32 *to_next;
293 u32 next_index = node->cached_next_index;
294 u32 *from = vlib_frame_vector_args (frame);
295 u32 n_left_from = frame->n_vectors;
296 u32 *from_end = from + n_left_from;
297 u32 thread_index = vm->thread_index;
298 vnet_interface_main_t *im = &vnm->interface_main;
299 vnet_interface_per_thread_data_t *ptd =
300 vec_elt_at_index (im->per_thread_data, thread_index);
301 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
302
303 vlib_get_buffers (vm, from, b, n_left_from);
304
305 while (n_left_from > 0)
306 {
307 u32 n_left_to_next;
308
309 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
310
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100311 if (!do_segmentation)
312 while (from + 8 <= from_end && n_left_to_next >= 4)
313 {
314 u32 bi0, bi1, bi2, bi3;
315 u32 next0, next1, next2, next3;
316 u32 swif0, swif1, swif2, swif3;
317 gso_trace_t *t0, *t1, *t2, *t3;
318 vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200319
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100320 /* Prefetch next iteration. */
321 vlib_prefetch_buffer_header (b[4], LOAD);
322 vlib_prefetch_buffer_header (b[5], LOAD);
323 vlib_prefetch_buffer_header (b[6], LOAD);
324 vlib_prefetch_buffer_header (b[7], LOAD);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200325
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100326 bi0 = from[0];
327 bi1 = from[1];
328 bi2 = from[2];
329 bi3 = from[3];
330 to_next[0] = bi0;
331 to_next[1] = bi1;
332 to_next[2] = bi2;
333 to_next[3] = bi3;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200334
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100335 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
336 swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
337 swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
338 swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200339
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100340 if (PREDICT_FALSE (hi->sw_if_index != swif0))
341 {
342 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
343 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
344 (b[0]->flags & VNET_BUFFER_F_GSO))
345 break;
346 }
347 if (PREDICT_FALSE (hi->sw_if_index != swif1))
348 {
349 hi1 = vnet_get_sup_hw_interface (vnm, swif0);
350 if (!(hi1->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
351 (b[1]->flags & VNET_BUFFER_F_GSO))
352 break;
353 }
354 if (PREDICT_FALSE (hi->sw_if_index != swif2))
355 {
356 hi2 = vnet_get_sup_hw_interface (vnm, swif0);
357 if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
358 (b[2]->flags & VNET_BUFFER_F_GSO))
359 break;
360 }
361 if (PREDICT_FALSE (hi->sw_if_index != swif3))
362 {
363 hi3 = vnet_get_sup_hw_interface (vnm, swif0);
364 if (!(hi3->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
365 (b[3]->flags & VNET_BUFFER_F_GSO))
366 break;
367 }
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200368
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100369 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
370 {
371 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
372 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
373 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
374 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
375 }
376 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
377 {
378 t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
379 t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
380 t1->gso_size = vnet_buffer2 (b[1])->gso_size;
381 t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
382 }
383 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
384 {
385 t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
386 t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
387 t2->gso_size = vnet_buffer2 (b[2])->gso_size;
388 t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
389 }
390 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
391 {
392 t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
393 t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
394 t3->gso_size = vnet_buffer2 (b[3])->gso_size;
395 t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
396 }
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200397
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100398 from += 4;
399 to_next += 4;
400 n_left_to_next -= 4;
401 n_left_from -= 4;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200402
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100403 next0 = next1 = 0;
404 next2 = next3 = 0;
405 vnet_feature_next (&next0, b[0]);
406 vnet_feature_next (&next1, b[1]);
407 vnet_feature_next (&next2, b[2]);
408 vnet_feature_next (&next3, b[3]);
409 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
410 n_left_to_next, bi0, bi1, bi2,
411 bi3, next0, next1, next2, next3);
412 b += 4;
413 }
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200414
415 while (from + 1 <= from_end && n_left_to_next > 0)
416 {
417 u32 bi0, swif0;
418 gso_trace_t *t0;
419 vnet_hw_interface_t *hi0;
420 u32 next0 = 0;
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100421 u32 do_segmentation0 = 0;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200422
423 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
424 if (PREDICT_FALSE (hi->sw_if_index != swif0))
425 {
426 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
427 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
428 (b[0]->flags & VNET_BUFFER_F_GSO))
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100429 do_segmentation0 = 1;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200430 }
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100431 else
432 do_segmentation0 = do_segmentation;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200433
434 /* speculatively enqueue b0 to the current next frame */
435 to_next[0] = bi0 = from[0];
436 to_next += 1;
437 n_left_to_next -= 1;
438 from += 1;
439 n_left_from -= 1;
440
441 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
442 {
443 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
444 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
445 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
446 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
447 }
448
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100449 if (do_segmentation0)
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200450 {
451 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
452 {
453 /*
454 * Undo the enqueue of the b0 - it is not going anywhere,
455 * and will be freed either after it's segmented or
456 * when dropped, if there is no buffers to segment into.
457 */
458 to_next -= 1;
459 n_left_to_next += 1;
460 /* undo the counting. */
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200461 gso_header_offset_t gho;
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200462 u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
463 u32 n_tx_bytes = 0;
464
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200465 gho = vnet_gso_header_offset_parser (b[0], is_ip6);
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200466 n_tx_bytes =
Mohsin Kazmi72e73122019-10-22 13:33:13 +0200467 tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200468 is_ip6);
469
470 if (PREDICT_FALSE (n_tx_bytes == 0))
471 {
472 drop_one_buffer_and_count (vm, vnm, node, from - 1,
473 hi->sw_if_index,
474 VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
475 b += 1;
476 continue;
477 }
478
479 u16 n_tx_bufs = vec_len (ptd->split_buffers);
480 u32 *from_seg = ptd->split_buffers;
481
482 while (n_tx_bufs > 0)
483 {
484 u32 sbi0;
485 vlib_buffer_t *sb0;
486 if (n_tx_bufs >= n_left_to_next)
487 {
488 while (n_left_to_next > 0)
489 {
490 sbi0 = to_next[0] = from_seg[0];
491 sb0 = vlib_get_buffer (vm, sbi0);
492 to_next += 1;
493 from_seg += 1;
494 n_left_to_next -= 1;
495 n_tx_bufs -= 1;
496 vnet_feature_next (&next0, sb0);
497 vlib_validate_buffer_enqueue_x1 (vm, node,
498 next_index,
499 to_next,
500 n_left_to_next,
501 sbi0, next0);
502 }
503 vlib_put_next_frame (vm, node, next_index,
504 n_left_to_next);
505 vlib_get_new_next_frame (vm, node, next_index,
506 to_next, n_left_to_next);
507 }
508 while (n_tx_bufs > 0)
509 {
510 sbi0 = to_next[0] = from_seg[0];
511 sb0 = vlib_get_buffer (vm, sbi0);
512 to_next += 1;
513 from_seg += 1;
514 n_left_to_next -= 1;
515 n_tx_bufs -= 1;
516 vnet_feature_next (&next0, sb0);
517 vlib_validate_buffer_enqueue_x1 (vm, node,
518 next_index,
519 to_next,
520 n_left_to_next,
521 sbi0, next0);
522 }
523 }
524 /* The buffers were enqueued. Reset the length */
525 _vec_len (ptd->split_buffers) = 0;
526 /* Free the now segmented buffer */
527 vlib_buffer_free_one (vm, bi0);
528 b += 1;
529 continue;
530 }
531 }
532
533 vnet_feature_next (&next0, b[0]);
534 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
535 n_left_to_next, bi0, next0);
536 b += 1;
537 }
538 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
539 }
540
541 return frame->n_vectors;
542}
543
544static_always_inline uword
545vnet_gso_inline (vlib_main_t * vm,
546 vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6)
547{
548 vnet_main_t *vnm = vnet_get_main ();
549 vnet_hw_interface_t *hi;
550
551 if (frame->n_vectors > 0)
552 {
553 u32 *from = vlib_frame_vector_args (frame);
554 vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
555 hi = vnet_get_sup_hw_interface (vnm,
556 vnet_buffer (b)->sw_if_index[VLIB_TX]);
Mohsin Kazmi70ae4ef2019-12-09 11:46:01 +0100557
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200558 if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
559 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
560 is_ip6, /* do_segmentation */ 0);
561 else
562 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
563 is_ip6, /* do_segmentation */ 1);
564 }
565 return 0;
566}
567
568VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
569 vlib_frame_t * frame)
570{
571 return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
572}
573
574VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
575 vlib_frame_t * frame)
576{
577 return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
578}
579
580VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
581 vlib_frame_t * frame)
582{
583 return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
584}
585
586VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
587 vlib_frame_t * frame)
588{
589 return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
590}
591
592/* *INDENT-OFF* */
593
594VLIB_REGISTER_NODE (gso_l2_ip4_node) = {
595 .vector_size = sizeof (u32),
596 .format_trace = format_gso_trace,
597 .type = VLIB_NODE_TYPE_INTERNAL,
598 .n_errors = 0,
599 .n_next_nodes = 0,
600 .name = "gso-l2-ip4",
601};
602
603VLIB_REGISTER_NODE (gso_l2_ip6_node) = {
604 .vector_size = sizeof (u32),
605 .format_trace = format_gso_trace,
606 .type = VLIB_NODE_TYPE_INTERNAL,
607 .n_errors = 0,
608 .n_next_nodes = 0,
609 .name = "gso-l2-ip6",
610};
611
612VLIB_REGISTER_NODE (gso_ip4_node) = {
613 .vector_size = sizeof (u32),
614 .format_trace = format_gso_trace,
615 .type = VLIB_NODE_TYPE_INTERNAL,
616 .n_errors = 0,
617 .n_next_nodes = 0,
618 .name = "gso-ip4",
619};
620
621VLIB_REGISTER_NODE (gso_ip6_node) = {
622 .vector_size = sizeof (u32),
623 .format_trace = format_gso_trace,
624 .type = VLIB_NODE_TYPE_INTERNAL,
625 .n_errors = 0,
626 .n_next_nodes = 0,
627 .name = "gso-ip6",
628};
629
630VNET_FEATURE_INIT (gso_l2_ip4_node, static) = {
631 .arc_name = "l2-output-ip4",
632 .node_name = "gso-l2-ip4",
633 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
634};
635
636VNET_FEATURE_INIT (gso_l2_ip6_node, static) = {
637 .arc_name = "l2-output-ip6",
638 .node_name = "gso-l2-ip6",
639 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
640};
641
642VNET_FEATURE_INIT (gso_ip4_node, static) = {
643 .arc_name = "ip4-output",
644 .node_name = "gso-ip4",
645 .runs_after = VNET_FEATURES ("ipsec4-output-feature"),
646 .runs_before = VNET_FEATURES ("interface-output"),
647};
648
649VNET_FEATURE_INIT (gso_ip6_node, static) = {
650 .arc_name = "ip6-output",
651 .node_name = "gso-ip6",
652 .runs_after = VNET_FEATURES ("ipsec6-output-feature"),
653 .runs_before = VNET_FEATURES ("interface-output"),
654};
655
656/*
657 * fd.io coding-style-patch-verification: ON
658 *
659 * Local Variables:
660 * eval: (c-set-style "gnu")
661 * End:
662 */