blob: 239009d680b1edb0f10fd0d30e2850827875ca8a [file] [log] [blame]
Mohsin Kazmif382b062020-08-11 15:00:44 +02001/*
2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_gro_func_h
17#define included_gro_func_h
18
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/gso/gro.h>
21#include <vnet/gso/hdr_offset_parser.h>
Florin Corasb040f982020-10-20 14:59:43 -070022#include <vnet/ip/ip4.h>
23#include <vnet/ip/ip6.h>
Mohsin Kazmif382b062020-08-11 15:00:44 +020024#include <vnet/udp/udp_packet.h>
Florin Coras97f96942020-10-20 13:45:51 -070025#include <vnet/tcp/tcp_packet.h>
Mohsin Kazmif382b062020-08-11 15:00:44 +020026#include <vnet/vnet.h>
27
28static_always_inline u8
29gro_is_bad_packet (vlib_buffer_t * b, u8 flags, i16 l234_sz)
30{
31 if (((b->current_length - l234_sz) <= 0) || ((flags &= ~TCP_FLAG_ACK) != 0))
32 return 1;
33 return 0;
34}
35
36static_always_inline void
37gro_get_ip4_flow_from_packet (u32 * sw_if_index,
38 ip4_header_t * ip4, tcp_header_t * tcp,
39 gro_flow_key_t * flow_key, int is_l2)
40{
41 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
42 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
43 ip46_address_set_ip4 (&flow_key->src_address, &ip4->src_address);
44 ip46_address_set_ip4 (&flow_key->dst_address, &ip4->dst_address);
45 flow_key->src_port = tcp->src_port;
46 flow_key->dst_port = tcp->dst_port;
47}
48
49static_always_inline void
50gro_get_ip6_flow_from_packet (u32 * sw_if_index,
51 ip6_header_t * ip6, tcp_header_t * tcp,
52 gro_flow_key_t * flow_key, int is_l2)
53{
54 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
55 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
56 ip46_address_set_ip6 (&flow_key->src_address, &ip6->src_address);
57 ip46_address_set_ip6 (&flow_key->dst_address, &ip6->dst_address);
58 flow_key->src_port = tcp->src_port;
59 flow_key->dst_port = tcp->dst_port;
60}
61
62static_always_inline u32
63gro_is_ip4_or_ip6_packet (vlib_buffer_t * b0, int is_l2)
64{
65 if (b0->flags & VNET_BUFFER_F_IS_IP4)
66 return VNET_BUFFER_F_IS_IP4;
67 if (b0->flags & VNET_BUFFER_F_IS_IP6)
68 return VNET_BUFFER_F_IS_IP6;
69 if (is_l2)
70 {
71 ethernet_header_t *eh =
72 (ethernet_header_t *) vlib_buffer_get_current (b0);
73 u16 ethertype = clib_net_to_host_u16 (eh->type);
74
75 if (ethernet_frame_is_tagged (ethertype))
76 {
77 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
78
79 ethertype = clib_net_to_host_u16 (vlan->type);
80 if (ethertype == ETHERNET_TYPE_VLAN)
81 {
82 vlan++;
83 ethertype = clib_net_to_host_u16 (vlan->type);
84 }
85 }
86 if (ethertype == ETHERNET_TYPE_IP4)
87 return VNET_BUFFER_F_IS_IP4;
88 if (ethertype == ETHERNET_TYPE_IP6)
89 return VNET_BUFFER_F_IS_IP6;
90 }
91 else
92 {
93 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x40)
94 return VNET_BUFFER_F_IS_IP4;
95 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x60)
96 return VNET_BUFFER_F_IS_IP6;
97 }
98
99 return 0;
100}
101
102typedef enum
103{
104 GRO_PACKET_ACTION_NONE = 0,
105 GRO_PACKET_ACTION_ENQUEUE = 1,
106 GRO_PACKET_ACTION_FLUSH = 2,
107} gro_packet_action_t;
108
109static_always_inline gro_packet_action_t
110gro_tcp_sequence_check (tcp_header_t * tcp0, tcp_header_t * tcp1,
111 u32 payload_len0)
112{
113 u32 next_tcp_seq0 = clib_net_to_host_u32 (tcp0->seq_number);
114 u32 next_tcp_seq1 = clib_net_to_host_u32 (tcp1->seq_number);
115
116 /* next packet, enqueue */
117 if (PREDICT_TRUE (next_tcp_seq0 + payload_len0 == next_tcp_seq1))
118 return GRO_PACKET_ACTION_ENQUEUE;
119 /* flush all packets */
120 else
121 return GRO_PACKET_ACTION_FLUSH;
122}
123
124static_always_inline void
125gro_merge_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
126 vlib_buffer_t * b1, u32 bi1, u32 payload_len1,
127 u16 l234_sz1)
128{
129 vlib_buffer_t *pb = b0;
130
131 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
132 b0->total_length_not_including_first_buffer = 0;
133
134 while (pb->flags & VLIB_BUFFER_NEXT_PRESENT)
135 pb = vlib_get_buffer (vm, pb->next_buffer);
136
137 vlib_buffer_advance (b1, l234_sz1);
138 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
139 pb->next_buffer = bi1;
140 b0->total_length_not_including_first_buffer += payload_len1;
141 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
142}
143
144static_always_inline u32
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200145gro_validate_checksum (vlib_main_t * vm, vlib_buffer_t * b0,
146 generic_header_offset_t * gho0, int is_ip4)
147{
148 u32 flags = 0;
149
Mohsin Kazmi68095382021-02-10 11:26:24 +0100150 if (b0->flags & VNET_BUFFER_F_OFFLOAD)
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200151 return VNET_BUFFER_F_L4_CHECKSUM_CORRECT;
152 vlib_buffer_advance (b0, gho0->l3_hdr_offset);
153 if (is_ip4)
154 flags = ip4_tcp_udp_validate_checksum (vm, b0);
155 else
156 flags = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
157 vlib_buffer_advance (b0, -gho0->l3_hdr_offset);
158 return flags;
159}
160
161static_always_inline u32
Mohsin Kazmif382b062020-08-11 15:00:44 +0200162gro_get_packet_data (vlib_main_t * vm, vlib_buffer_t * b0,
163 generic_header_offset_t * gho0,
164 gro_flow_key_t * flow_key0, int is_l2)
165{
166 ip4_header_t *ip4_0 = 0;
167 ip6_header_t *ip6_0 = 0;
168 tcp_header_t *tcp0 = 0;
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200169 u32 flags = 0;
Mohsin Kazmif382b062020-08-11 15:00:44 +0200170 u32 pkt_len0 = 0;
171 u16 l234_sz0 = 0;
172 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
173
174 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
175
176 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
177 vnet_generic_header_offset_parser (b0, gho0, is_l2, 1 /* is_ip4 */ ,
178 0 /* is_ip6 */ );
179 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
180 vnet_generic_header_offset_parser (b0, gho0, is_l2, 0 /* is_ip4 */ ,
181 1 /* is_ip6 */ );
182 else
183 return 0;
184
185 if (PREDICT_FALSE ((gho0->gho_flags & GHO_F_TCP) == 0))
186 return 0;
187
188 ip4_0 =
189 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
190 ip6_0 =
191 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
192 tcp0 =
193 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0->l4_hdr_offset);
194
195 l234_sz0 = gho0->hdr_sz;
196 if (PREDICT_FALSE (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)))
197 return 0;
198
199 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
200 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
201
202 if (gho0->gho_flags & GHO_F_IP4)
203 {
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200204 flags = gro_validate_checksum (vm, b0, gho0, 1);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200205 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, flow_key0,
206 is_l2);
207 }
208 else if (gho0->gho_flags & GHO_F_IP6)
209 {
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200210 flags = gro_validate_checksum (vm, b0, gho0, 0);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200211 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, flow_key0,
212 is_l2);
213 }
214 else
215 return 0;
216
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200217 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) == 0)
218 return 0;
219
Mohsin Kazmif382b062020-08-11 15:00:44 +0200220 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
221 if (PREDICT_FALSE (pkt_len0 >= TCP_MAX_GSO_SZ))
222 return 0;
223
224 return pkt_len0;
225}
226
227static_always_inline u32
228gro_coalesce_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
229 vlib_buffer_t * b1, u32 bi1, int is_l2)
230{
231 generic_header_offset_t gho0 = { 0 };
232 generic_header_offset_t gho1 = { 0 };
233 gro_flow_key_t flow_key0, flow_key1;
234 ip4_header_t *ip4_0, *ip4_1;
235 ip6_header_t *ip6_0, *ip6_1;
236 tcp_header_t *tcp0, *tcp1;
237 u16 l234_sz0, l234_sz1;
238 u32 pkt_len0, pkt_len1, payload_len0, payload_len1;
239 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
240 u32 sw_if_index1[VLIB_N_RX_TX] = { ~0 };
241
242 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
243 u32 is_ip1 = gro_is_ip4_or_ip6_packet (b1, is_l2);
244
245 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
246 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
247 0 /* is_ip6 */ );
248 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
249 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
250 1 /* is_ip6 */ );
251 else
252 return 0;
253
254 if (is_ip1 & VNET_BUFFER_F_IS_IP4)
255 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 1 /* is_ip4 */ ,
256 0 /* is_ip6 */ );
257 else if (is_ip1 & VNET_BUFFER_F_IS_IP6)
258 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 0 /* is_ip4 */ ,
259 1 /* is_ip6 */ );
260 else
261 return 0;
262
263 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
264 pkt_len1 = vlib_buffer_length_in_chain (vm, b1);
265
266 if (((gho0.gho_flags & GHO_F_TCP) == 0)
267 || ((gho1.gho_flags & GHO_F_TCP) == 0))
268 return 0;
269
270 ip4_0 =
271 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
272 ip4_1 =
273 (ip4_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
274 ip6_0 =
275 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
276 ip6_1 =
277 (ip6_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
278
279 tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
280 tcp1 = (tcp_header_t *) (vlib_buffer_get_current (b1) + gho1.l4_hdr_offset);
281
282 l234_sz0 = gho0.hdr_sz;
283 l234_sz1 = gho1.hdr_sz;
284
285 if (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)
286 || gro_is_bad_packet (b1, tcp1->flags, l234_sz1))
287 return 0;
288
289 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
290 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
291
292 sw_if_index1[VLIB_RX] = vnet_buffer (b1)->sw_if_index[VLIB_RX];
293 sw_if_index1[VLIB_TX] = vnet_buffer (b1)->sw_if_index[VLIB_TX];
294
295 if ((gho0.gho_flags & GHO_F_IP4) && (gho1.gho_flags & GHO_F_IP4))
296 {
297 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, &flow_key0,
298 is_l2);
299 gro_get_ip4_flow_from_packet (sw_if_index1, ip4_1, tcp1, &flow_key1,
300 is_l2);
301 }
302 else if ((gho0.gho_flags & GHO_F_IP6) && (gho1.gho_flags & GHO_F_IP6))
303 {
304 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, &flow_key0,
305 is_l2);
306 gro_get_ip6_flow_from_packet (sw_if_index1, ip6_1, tcp1, &flow_key1,
307 is_l2);
308 }
309 else
310 return 0;
311
312 if (gro_flow_is_equal (&flow_key0, &flow_key1) == 0)
313 return 0;
314
315 payload_len0 = pkt_len0 - l234_sz0;
316 payload_len1 = pkt_len1 - l234_sz1;
317
318 if (pkt_len0 >= TCP_MAX_GSO_SZ || pkt_len1 >= TCP_MAX_GSO_SZ
319 || (pkt_len0 + payload_len1) >= TCP_MAX_GSO_SZ)
320 return 0;
321
322 if (gro_tcp_sequence_check (tcp0, tcp1, payload_len0) ==
323 GRO_PACKET_ACTION_ENQUEUE)
324 {
325 gro_merge_buffers (vm, b0, b1, bi1, payload_len1, l234_sz1);
326 return tcp1->ack_number;
327 }
328
329 return 0;
330}
331
332static_always_inline void
333gro_fixup_header (vlib_main_t * vm, vlib_buffer_t * b0, u32 ack_number,
334 int is_l2)
335{
336 generic_header_offset_t gho0 = { 0 };
337
338 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
339
340 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
341 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
342 0 /* is_ip6 */ );
343 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
344 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
345 1 /* is_ip6 */ );
346
347 vnet_buffer2 (b0)->gso_size = b0->current_length - gho0.hdr_sz;
348
349 if (gho0.gho_flags & GHO_F_IP4)
350 {
351 ip4_header_t *ip4 =
352 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
353 ip4->length =
354 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
355 gho0.l3_hdr_offset);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100356 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
357 vnet_buffer_offload_flags_set (b0, (VNET_BUFFER_OFFLOAD_F_TCP_CKSUM |
358 VNET_BUFFER_OFFLOAD_F_IP_CKSUM));
Mohsin Kazmif382b062020-08-11 15:00:44 +0200359 }
360 else if (gho0.gho_flags & GHO_F_IP6)
361 {
362 ip6_header_t *ip6 =
363 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
364 ip6->payload_length =
365 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
366 gho0.l4_hdr_offset);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100367 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
368 vnet_buffer_offload_flags_set (b0, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200369 }
370
371 tcp_header_t *tcp0 =
372 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
373 tcp0->ack_number = ack_number;
374 b0->flags &= ~VLIB_BUFFER_IS_TRACED;
375}
376
377static_always_inline u32
378vnet_gro_flow_table_flush (vlib_main_t * vm, gro_flow_table_t * flow_table,
379 u32 * to)
380{
381 if (flow_table->flow_table_size > 0)
382 {
383 gro_flow_t *gro_flow;
384 u32 i = 0, j = 0;
385 while (i < GRO_FLOW_TABLE_MAX_SIZE)
386 {
387 gro_flow = &flow_table->gro_flow[i];
388 if (gro_flow->n_buffers && gro_flow_is_timeout (vm, gro_flow))
389 {
390 // flush the packet
391 vlib_buffer_t *b0 =
392 vlib_get_buffer (vm, gro_flow->buffer_index);
393 gro_fixup_header (vm, b0, gro_flow->last_ack_number,
394 flow_table->is_l2);
395 to[j] = gro_flow->buffer_index;
396 gro_flow_table_reset_flow (flow_table, gro_flow);
397 flow_table->n_vectors++;
398 j++;
399 }
400 i++;
401 }
402
403 return j;
404 }
405 return 0;
406}
407
408static_always_inline void
409vnet_gro_flow_table_schedule_node_on_dispatcher (vlib_main_t * vm,
410 gro_flow_table_t *
411 flow_table)
412{
413 if (gro_flow_table_is_timeout (vm, flow_table))
414 {
415 u32 to[GRO_FLOW_TABLE_MAX_SIZE] = { 0 };
416 u32 n_to = vnet_gro_flow_table_flush (vm, flow_table, to);
417
418 if (n_to > 0)
419 {
420 u32 node_index = flow_table->node_index;
421 vlib_frame_t *f = vlib_get_frame_to_node (vm, node_index);
422 u32 *f_to = vlib_frame_vector_args (f);
423 u32 i = 0;
424
425 while (i < n_to)
426 {
427 f_to[f->n_vectors] = to[i];
428 i++;
429 f->n_vectors++;
430 }
431 vlib_put_frame_to_node (vm, node_index, f);
432 }
433 gro_flow_table_set_timeout (vm, flow_table, GRO_FLOW_TABLE_FLUSH);
434 }
435}
436
437static_always_inline u32
438vnet_gro_flow_table_inline (vlib_main_t * vm, gro_flow_table_t * flow_table,
439 u32 bi0, u32 * to)
440{
441 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
442 generic_header_offset_t gho0 = { 0 };
443 gro_flow_t *gro_flow = 0;
444 gro_flow_key_t flow_key0 = { };
445 tcp_header_t *tcp0 = 0;
446 u32 pkt_len0 = 0;
447 int is_l2 = flow_table->is_l2;
448
449 if (!gro_flow_table_is_enable (flow_table))
450 {
451 to[0] = bi0;
452 return 1;
453 }
454
455 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
456 {
457 to[0] = bi0;
458 return 1;
459 }
460
461 pkt_len0 = gro_get_packet_data (vm, b0, &gho0, &flow_key0, is_l2);
462 if (pkt_len0 == 0)
463 {
464 to[0] = bi0;
465 return 1;
466 }
467
468 gro_flow = gro_flow_table_find_or_add_flow (flow_table, &flow_key0);
469 if (!gro_flow)
470 {
471 to[0] = bi0;
472 return 1;
473 }
474
475 if (PREDICT_FALSE (gro_flow->n_buffers == 0))
476 {
477 flow_table->total_vectors++;
478 gro_flow_store_packet (gro_flow, bi0);
479 tcp0 =
480 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
481 gro_flow->last_ack_number = tcp0->ack_number;
482 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
483 return 0;
484 }
485 else
486 {
487 tcp0 =
488 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
489 generic_header_offset_t gho_s = { 0 };
490 tcp_header_t *tcp_s;
491 u16 l234_sz0, l234_sz_s;
492 u32 pkt_len_s, payload_len0, payload_len_s;
493 u32 bi_s = gro_flow->buffer_index;
494
495 vlib_buffer_t *b_s = vlib_get_buffer (vm, bi_s);
496 u32 is_ip_s = gro_is_ip4_or_ip6_packet (b_s, is_l2);
497 if (is_ip_s & VNET_BUFFER_F_IS_IP4)
498 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
499 1 /* is_ip4 */ , 0 /* is_ip6 */ );
500 else if (is_ip_s & VNET_BUFFER_F_IS_IP6)
501 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
502 0 /* is_ip4 */ , 1 /* is_ip6 */ );
503
504 tcp_s =
505 (tcp_header_t *) (vlib_buffer_get_current (b_s) +
506 gho_s.l4_hdr_offset);
507 pkt_len_s = vlib_buffer_length_in_chain (vm, b_s);
508 l234_sz0 = gho0.hdr_sz;
509 l234_sz_s = gho_s.hdr_sz;
510 payload_len0 = pkt_len0 - l234_sz0;
511 payload_len_s = pkt_len_s - l234_sz_s;
512 gro_packet_action_t action =
513 gro_tcp_sequence_check (tcp_s, tcp0, payload_len_s);
514
515 if (PREDICT_TRUE (action == GRO_PACKET_ACTION_ENQUEUE))
516 {
517 if (PREDICT_TRUE ((pkt_len_s + payload_len0) < TCP_MAX_GSO_SZ))
518 {
519 flow_table->total_vectors++;
520 gro_merge_buffers (vm, b_s, b0, bi0, payload_len0, l234_sz0);
521 gro_flow_store_packet (gro_flow, bi0);
522 gro_flow->last_ack_number = tcp0->ack_number;
523 return 0;
524 }
525 else
526 {
527 // flush the stored GSO size packet and buffer the current packet
528 flow_table->n_vectors++;
529 flow_table->total_vectors++;
530 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
531 gro_flow->n_buffers = 0;
532 gro_flow_store_packet (gro_flow, bi0);
533 gro_flow->last_ack_number = tcp0->ack_number;
534 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
535 to[0] = bi_s;
536 return 1;
537 }
538 }
539 else
540 {
541 // flush the all (current and stored) packets
542 flow_table->n_vectors++;
543 flow_table->total_vectors++;
544 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
545 gro_flow->n_buffers = 0;
546 gro_flow_table_reset_flow (flow_table, gro_flow);
547 to[0] = bi_s;
548 to[1] = bi0;
549 return 2;
550 }
551 }
552}
553
554/**
555 * coalesce buffers with flow tables
556 */
557static_always_inline u32
558vnet_gro_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, u32 * from,
559 u16 n_left_from, u32 * to)
560{
561 u16 count = 0, i = 0;
562
563 for (i = 0; i < n_left_from; i++)
564 count += vnet_gro_flow_table_inline (vm, flow_table, from[i], &to[count]);
565
566 return count;
567}
568
569/**
570 * coalesce buffers in opportunistic way without flow tables
571 */
572static_always_inline u32
573vnet_gro_simple_inline (vlib_main_t * vm, u32 * from, u16 n_left_from,
574 int is_l2)
575{
576 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
577 vlib_get_buffers (vm, from, b, n_left_from);
578 u32 bi = 1, ack_number = 0;
579 if (PREDICT_TRUE (((b[0]->flags & VNET_BUFFER_F_GSO) == 0)))
580 {
581 while (n_left_from > 1)
582 {
583 if (PREDICT_TRUE (((b[bi]->flags & VNET_BUFFER_F_GSO) == 0)))
584 {
585 u32 ret;
586 if ((ret =
587 gro_coalesce_buffers (vm, b[0], b[bi], from[bi],
588 is_l2)) != 0)
589 {
590 n_left_from -= 1;
591 bi += 1;
592 ack_number = ret;
593 continue;
594 }
595 else
596 break;
597 }
598 else
599 break;
600 }
601
602 if (bi >= 2)
603 {
604 gro_fixup_header (vm, b[0], ack_number, is_l2);
605 }
606 }
607 return bi;
608}
609#endif /* included_gro_func_h */
610
611/*
612 * fd.io coding-style-patch-verification: ON
613 *
614 * Local Variables:
615 * eval: (c-set-style "gnu")
616 * End:
617 */