blob: 069016147d497e65bbb9d78764973fe89562915b [file] [log] [blame]
Mohsin Kazmi29467b52019-10-08 19:42:38 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vlib/vlib.h>
17#include <vnet/vnet.h>
18#include <vppinfra/error.h>
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/feature/feature.h>
21#include <vnet/gso/gso.h>
22#include <vnet/ip/icmp46_packet.h>
23#include <vnet/ip/ip4.h>
24#include <vnet/ip/ip6.h>
25#include <vnet/udp/udp_packet.h>
26
27typedef struct
28{
29 u32 flags;
30 u16 gso_size;
31 u8 gso_l4_hdr_sz;
32} gso_trace_t;
33
34static u8 *
35format_gso_trace (u8 * s, va_list * args)
36{
37 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
38 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
39 gso_trace_t *t = va_arg (*args, gso_trace_t *);
40
41 if (t->flags & VNET_BUFFER_F_GSO)
42 {
43 s = format (s, "gso_sz %d gso_l4_hdr_sz %d",
44 t->gso_size, t->gso_l4_hdr_sz);
45 }
46
47 return s;
48}
49
50static_always_inline u16
51tso_alloc_tx_bufs (vlib_main_t * vm,
52 vnet_interface_per_thread_data_t * ptd,
53 vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
54 u16 gso_size)
55{
56 u16 size =
57 clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz);
58
59 /* rounded-up division */
60 u16 n_bufs = (n_bytes_b0 - l234_sz + (size - 1)) / size;
61 u16 n_alloc;
62
63 ASSERT (n_bufs > 0);
64 vec_validate (ptd->split_buffers, n_bufs - 1);
65
66 n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
67 if (n_alloc < n_bufs)
68 {
69 vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
70 return 0;
71 }
72 return n_alloc;
73}
74
75static_always_inline void
76tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
77 u32 flags, u16 length)
78{
79 nb0->current_data = b0->current_data;
80 nb0->total_length_not_including_first_buffer = 0;
81 nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
BenoƮt Ganne94afc932019-10-23 15:47:22 +020082 nb0->trace_handle = b0->trace_handle;
Mohsin Kazmi29467b52019-10-08 19:42:38 +020083 clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
84 clib_memcpy_fast (vlib_buffer_get_current (nb0),
85 vlib_buffer_get_current (b0), length);
86 nb0->current_length = length;
87}
88
89static_always_inline void
90tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
91 vlib_buffer_t * b0, u16 template_data_sz,
92 u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
93 u32 next_tcp_seq, u32 flags)
94{
95 tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
96
97 *p_dst_left =
98 clib_min (gso_size,
99 vlib_buffer_get_default_data_size (vm) - (template_data_sz +
100 nb0->current_data));
101 *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
102
103 tcp_header_t *tcp =
104 (tcp_header_t *) (nb0->data + vnet_buffer (nb0)->l4_hdr_offset);
105 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
106}
107
108static_always_inline void
109tso_fixup_segmented_buf (vlib_buffer_t * b0, u8 tcp_flags, int is_ip6)
110{
111 u16 l3_hdr_offset = vnet_buffer (b0)->l3_hdr_offset;
112 u16 l4_hdr_offset = vnet_buffer (b0)->l4_hdr_offset;
113 ip4_header_t *ip4 = (ip4_header_t *) (b0->data + l3_hdr_offset);
114 ip6_header_t *ip6 = (ip6_header_t *) (b0->data + l3_hdr_offset);
115 tcp_header_t *tcp = (tcp_header_t *) (b0->data + l4_hdr_offset);
116
117 tcp->flags = tcp_flags;
118
119 if (is_ip6)
120 ip6->payload_length =
121 clib_host_to_net_u16 (b0->current_length -
122 (l4_hdr_offset - b0->current_data));
123 else
124 ip4->length =
125 clib_host_to_net_u16 (b0->current_length -
126 (l3_hdr_offset - b0->current_data));
127}
128
129/**
130 * Allocate the necessary number of ptd->split_buffers,
131 * and segment the possibly chained buffer(s) from b0 into
132 * there.
133 *
134 * Return the cumulative number of bytes sent or zero
135 * if allocation failed.
136 */
137
138static_always_inline u32
139tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
140 u32 sbi0, vlib_buffer_t * sb0, u32 n_bytes_b0, int is_ip6)
141{
142 u32 n_tx_bytes = 0;
143 ASSERT (sb0->flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID);
144 ASSERT (sb0->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID);
145 ASSERT (sb0->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
146 u16 gso_size = vnet_buffer2 (sb0)->gso_size;
147
148 int l4_hdr_sz = vnet_buffer2 (sb0)->gso_l4_hdr_sz;
149 u8 save_tcp_flags = 0;
150 u8 tcp_flags_no_fin_psh = 0;
151 u32 next_tcp_seq = 0;
152
153 tcp_header_t *tcp =
154 (tcp_header_t *) (sb0->data + vnet_buffer (sb0)->l4_hdr_offset);
155 next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
156 /* store original flags for last packet and reset FIN and PSH */
157 save_tcp_flags = tcp->flags;
158 tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
159 tcp->checksum = 0;
160
161 u32 default_bflags =
162 sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
163 u16 l234_sz = vnet_buffer (sb0)->l4_hdr_offset + l4_hdr_sz
164 - sb0->current_data;
165 int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
166 next_tcp_seq += first_data_size;
167
168 if (PREDICT_FALSE
169 (!tso_alloc_tx_bufs (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size)))
170 return 0;
171
172 vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
173 tso_init_buf_from_template_base (b0, sb0, default_bflags,
174 l234_sz + first_data_size);
175
176 u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
177 if (total_src_left)
178 {
179 /* Need to copy more segments */
180 u8 *src_ptr, *dst_ptr;
181 u16 src_left, dst_left;
182 /* current source buffer */
183 vlib_buffer_t *csb0 = sb0;
184 u32 csbi0 = sbi0;
185 /* current dest buffer */
186 vlib_buffer_t *cdb0;
187 u16 dbi = 1; /* the buffer [0] is b0 */
188
189 src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
190 src_left = sb0->current_length - l234_sz - first_data_size;
191
192 tso_fixup_segmented_buf (b0, tcp_flags_no_fin_psh, is_ip6);
193
194 /* grab a second buffer and prepare the loop */
195 ASSERT (dbi < vec_len (ptd->split_buffers));
196 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
197 tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
198 &dst_left, next_tcp_seq, default_bflags);
199
200 /* an arbitrary large number to catch the runaway loops */
201 int nloops = 2000;
202 while (total_src_left)
203 {
204 if (nloops-- <= 0)
205 clib_panic ("infinite loop detected");
206 u16 bytes_to_copy = clib_min (src_left, dst_left);
207
208 clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
209
210 src_left -= bytes_to_copy;
211 src_ptr += bytes_to_copy;
212 total_src_left -= bytes_to_copy;
213 dst_left -= bytes_to_copy;
214 dst_ptr += bytes_to_copy;
215 next_tcp_seq += bytes_to_copy;
216 cdb0->current_length += bytes_to_copy;
217
218 if (0 == src_left)
219 {
220 int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
221 u32 next_bi = csb0->next_buffer;
222
223 /* init src to the next buffer in chain */
224 if (has_next)
225 {
226 csbi0 = next_bi;
227 csb0 = vlib_get_buffer (vm, csbi0);
228 src_left = csb0->current_length;
229 src_ptr = vlib_buffer_get_current (csb0);
230 }
231 else
232 {
233 ASSERT (total_src_left == 0);
234 break;
235 }
236 }
237 if (0 == dst_left && total_src_left)
238 {
239 n_tx_bytes += cdb0->current_length;
240 ASSERT (dbi < vec_len (ptd->split_buffers));
241 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
242 tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
243 gso_size, &dst_ptr, &dst_left,
244 next_tcp_seq, default_bflags);
245 }
246 }
247
248 tso_fixup_segmented_buf (cdb0, save_tcp_flags, is_ip6);
249
250 n_tx_bytes += cdb0->current_length;
251 }
252 n_tx_bytes += b0->current_length;
253 return n_tx_bytes;
254}
255
256static_always_inline void
257drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
258 vlib_node_runtime_t * node, u32 * pbi0,
259 u32 sw_if_index, u32 drop_error_code)
260{
261 u32 thread_index = vm->thread_index;
262
263 vlib_simple_counter_main_t *cm;
264 cm =
265 vec_elt_at_index (vnm->interface_main.sw_if_counters,
266 VNET_INTERFACE_COUNTER_TX_ERROR);
267 vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
268
269 vlib_error_drop_buffers (vm, node, pbi0,
270 /* buffer stride */ 1,
271 /* n_buffers */ 1,
272 VNET_INTERFACE_OUTPUT_NEXT_DROP,
273 node->node_index, drop_error_code);
274}
275
276static_always_inline uword
277vnet_gso_node_inline (vlib_main_t * vm,
278 vlib_node_runtime_t * node,
279 vlib_frame_t * frame,
280 vnet_main_t * vnm,
281 vnet_hw_interface_t * hi,
282 int is_ip6, int do_segmentation)
283{
284 u32 *to_next;
285 u32 next_index = node->cached_next_index;
286 u32 *from = vlib_frame_vector_args (frame);
287 u32 n_left_from = frame->n_vectors;
288 u32 *from_end = from + n_left_from;
289 u32 thread_index = vm->thread_index;
290 vnet_interface_main_t *im = &vnm->interface_main;
291 vnet_interface_per_thread_data_t *ptd =
292 vec_elt_at_index (im->per_thread_data, thread_index);
293 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
294
295 vlib_get_buffers (vm, from, b, n_left_from);
296
297 while (n_left_from > 0)
298 {
299 u32 n_left_to_next;
300
301 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
302
303 while (from + 8 <= from_end && n_left_to_next >= 4)
304 {
305 u32 bi0, bi1, bi2, bi3;
306 u32 next0, next1, next2, next3;
307 u32 swif0, swif1, swif2, swif3;
308 gso_trace_t *t0, *t1, *t2, *t3;
309 vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
310
311 /* Prefetch next iteration. */
312 vlib_prefetch_buffer_header (b[4], LOAD);
313 vlib_prefetch_buffer_header (b[5], LOAD);
314 vlib_prefetch_buffer_header (b[6], LOAD);
315 vlib_prefetch_buffer_header (b[7], LOAD);
316
317 bi0 = from[0];
318 bi1 = from[1];
319 bi2 = from[2];
320 bi3 = from[3];
321 to_next[0] = bi0;
322 to_next[1] = bi1;
323 to_next[2] = bi2;
324 to_next[3] = bi3;
325
326 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
327 swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
328 swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
329 swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
330
331 if (PREDICT_FALSE (hi->sw_if_index != swif0))
332 {
333 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
334 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
335 (b[0]->flags & VNET_BUFFER_F_GSO))
336 break;
337 }
338 if (PREDICT_FALSE (hi->sw_if_index != swif1))
339 {
340 hi1 = vnet_get_sup_hw_interface (vnm, swif0);
341 if (!(hi1->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
342 (b[1]->flags & VNET_BUFFER_F_GSO))
343 break;
344 }
345 if (PREDICT_FALSE (hi->sw_if_index != swif2))
346 {
347 hi2 = vnet_get_sup_hw_interface (vnm, swif0);
348 if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
349 (b[2]->flags & VNET_BUFFER_F_GSO))
350 break;
351 }
352 if (PREDICT_FALSE (hi->sw_if_index != swif3))
353 {
354 hi3 = vnet_get_sup_hw_interface (vnm, swif0);
355 if (!(hi3->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
356 (b[3]->flags & VNET_BUFFER_F_GSO))
357 break;
358 }
359
360 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
361 {
362 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
363 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
364 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
365 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
366 }
367 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
368 {
369 t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
370 t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
371 t1->gso_size = vnet_buffer2 (b[1])->gso_size;
372 t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
373 }
374 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
375 {
376 t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
377 t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
378 t2->gso_size = vnet_buffer2 (b[2])->gso_size;
379 t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
380 }
381 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
382 {
383 t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
384 t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
385 t3->gso_size = vnet_buffer2 (b[3])->gso_size;
386 t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
387 }
388
389 from += 4;
390 to_next += 4;
391 n_left_to_next -= 4;
392 n_left_from -= 4;
393
394 next0 = next1 = 0;
395 next2 = next3 = 0;
396 vnet_feature_next (&next0, b[0]);
397 vnet_feature_next (&next1, b[1]);
398 vnet_feature_next (&next2, b[2]);
399 vnet_feature_next (&next3, b[3]);
400 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
401 n_left_to_next, bi0, bi1, bi2, bi3,
402 next0, next1, next2, next3);
403 b += 4;
404 }
405
406 while (from + 1 <= from_end && n_left_to_next > 0)
407 {
408 u32 bi0, swif0;
409 gso_trace_t *t0;
410 vnet_hw_interface_t *hi0;
411 u32 next0 = 0;
412
413 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
414 if (PREDICT_FALSE (hi->sw_if_index != swif0))
415 {
416 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
417 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
418 (b[0]->flags & VNET_BUFFER_F_GSO))
419 do_segmentation = 1;
420 }
421
422 /* speculatively enqueue b0 to the current next frame */
423 to_next[0] = bi0 = from[0];
424 to_next += 1;
425 n_left_to_next -= 1;
426 from += 1;
427 n_left_from -= 1;
428
429 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
430 {
431 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
432 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
433 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
434 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
435 }
436
437 if (do_segmentation)
438 {
439 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
440 {
441 /*
442 * Undo the enqueue of the b0 - it is not going anywhere,
443 * and will be freed either after it's segmented or
444 * when dropped, if there is no buffers to segment into.
445 */
446 to_next -= 1;
447 n_left_to_next += 1;
448 /* undo the counting. */
449 u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
450 u32 n_tx_bytes = 0;
451
452 n_tx_bytes =
453 tso_segment_buffer (vm, ptd, bi0, b[0], n_bytes_b0,
454 is_ip6);
455
456 if (PREDICT_FALSE (n_tx_bytes == 0))
457 {
458 drop_one_buffer_and_count (vm, vnm, node, from - 1,
459 hi->sw_if_index,
460 VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
461 b += 1;
462 continue;
463 }
464
465 u16 n_tx_bufs = vec_len (ptd->split_buffers);
466 u32 *from_seg = ptd->split_buffers;
467
468 while (n_tx_bufs > 0)
469 {
470 u32 sbi0;
471 vlib_buffer_t *sb0;
472 if (n_tx_bufs >= n_left_to_next)
473 {
474 while (n_left_to_next > 0)
475 {
476 sbi0 = to_next[0] = from_seg[0];
477 sb0 = vlib_get_buffer (vm, sbi0);
478 to_next += 1;
479 from_seg += 1;
480 n_left_to_next -= 1;
481 n_tx_bufs -= 1;
482 vnet_feature_next (&next0, sb0);
483 vlib_validate_buffer_enqueue_x1 (vm, node,
484 next_index,
485 to_next,
486 n_left_to_next,
487 sbi0, next0);
488 }
489 vlib_put_next_frame (vm, node, next_index,
490 n_left_to_next);
491 vlib_get_new_next_frame (vm, node, next_index,
492 to_next, n_left_to_next);
493 }
494 while (n_tx_bufs > 0)
495 {
496 sbi0 = to_next[0] = from_seg[0];
497 sb0 = vlib_get_buffer (vm, sbi0);
498 to_next += 1;
499 from_seg += 1;
500 n_left_to_next -= 1;
501 n_tx_bufs -= 1;
502 vnet_feature_next (&next0, sb0);
503 vlib_validate_buffer_enqueue_x1 (vm, node,
504 next_index,
505 to_next,
506 n_left_to_next,
507 sbi0, next0);
508 }
509 }
510 /* The buffers were enqueued. Reset the length */
511 _vec_len (ptd->split_buffers) = 0;
512 /* Free the now segmented buffer */
513 vlib_buffer_free_one (vm, bi0);
514 b += 1;
515 continue;
516 }
517 }
518
519 vnet_feature_next (&next0, b[0]);
520 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
521 n_left_to_next, bi0, next0);
522 b += 1;
523 }
524 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
525 }
526
527 return frame->n_vectors;
528}
529
530static_always_inline uword
531vnet_gso_inline (vlib_main_t * vm,
532 vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6)
533{
534 vnet_main_t *vnm = vnet_get_main ();
535 vnet_hw_interface_t *hi;
536
537 if (frame->n_vectors > 0)
538 {
539 u32 *from = vlib_frame_vector_args (frame);
540 vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
541 hi = vnet_get_sup_hw_interface (vnm,
542 vnet_buffer (b)->sw_if_index[VLIB_TX]);
543 /*
544 * The 3-headed "if" is here because we want to err on the side
545 * of not impacting the non-GSO performance - so for the more
546 * common case of no GSO interfaces we want to prevent the
547 * segmentation codepath from being there altogether.
548 */
549 if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
550 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
551 is_ip6, /* do_segmentation */ 0);
552 else
553 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
554 is_ip6, /* do_segmentation */ 1);
555 }
556 return 0;
557}
558
559VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
560 vlib_frame_t * frame)
561{
562 return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
563}
564
565VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
566 vlib_frame_t * frame)
567{
568 return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
569}
570
571VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
572 vlib_frame_t * frame)
573{
574 return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
575}
576
577VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
578 vlib_frame_t * frame)
579{
580 return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
581}
582
583/* *INDENT-OFF* */
584
585VLIB_REGISTER_NODE (gso_l2_ip4_node) = {
586 .vector_size = sizeof (u32),
587 .format_trace = format_gso_trace,
588 .type = VLIB_NODE_TYPE_INTERNAL,
589 .n_errors = 0,
590 .n_next_nodes = 0,
591 .name = "gso-l2-ip4",
592};
593
594VLIB_REGISTER_NODE (gso_l2_ip6_node) = {
595 .vector_size = sizeof (u32),
596 .format_trace = format_gso_trace,
597 .type = VLIB_NODE_TYPE_INTERNAL,
598 .n_errors = 0,
599 .n_next_nodes = 0,
600 .name = "gso-l2-ip6",
601};
602
603VLIB_REGISTER_NODE (gso_ip4_node) = {
604 .vector_size = sizeof (u32),
605 .format_trace = format_gso_trace,
606 .type = VLIB_NODE_TYPE_INTERNAL,
607 .n_errors = 0,
608 .n_next_nodes = 0,
609 .name = "gso-ip4",
610};
611
612VLIB_REGISTER_NODE (gso_ip6_node) = {
613 .vector_size = sizeof (u32),
614 .format_trace = format_gso_trace,
615 .type = VLIB_NODE_TYPE_INTERNAL,
616 .n_errors = 0,
617 .n_next_nodes = 0,
618 .name = "gso-ip6",
619};
620
621VNET_FEATURE_INIT (gso_l2_ip4_node, static) = {
622 .arc_name = "l2-output-ip4",
623 .node_name = "gso-l2-ip4",
624 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
625};
626
627VNET_FEATURE_INIT (gso_l2_ip6_node, static) = {
628 .arc_name = "l2-output-ip6",
629 .node_name = "gso-l2-ip6",
630 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
631};
632
633VNET_FEATURE_INIT (gso_ip4_node, static) = {
634 .arc_name = "ip4-output",
635 .node_name = "gso-ip4",
636 .runs_after = VNET_FEATURES ("ipsec4-output-feature"),
637 .runs_before = VNET_FEATURES ("interface-output"),
638};
639
640VNET_FEATURE_INIT (gso_ip6_node, static) = {
641 .arc_name = "ip6-output",
642 .node_name = "gso-ip6",
643 .runs_after = VNET_FEATURES ("ipsec6-output-feature"),
644 .runs_before = VNET_FEATURES ("interface-output"),
645};
646
647/*
648 * fd.io coding-style-patch-verification: ON
649 *
650 * Local Variables:
651 * eval: (c-set-style "gnu")
652 * End:
653 */