blob: a410a651933e0113f96ac79d83eb1e2cda4e7ada [file] [log] [blame]
Mohsin Kazmif382b062020-08-11 15:00:44 +02001/*
2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_gro_func_h
17#define included_gro_func_h
18
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/gso/gro.h>
21#include <vnet/gso/hdr_offset_parser.h>
22#include <vnet/udp/udp_packet.h>
23#include <vnet/tcp/tcp.h>
24#include <vnet/vnet.h>
25
26static_always_inline u8
27gro_is_bad_packet (vlib_buffer_t * b, u8 flags, i16 l234_sz)
28{
29 if (((b->current_length - l234_sz) <= 0) || ((flags &= ~TCP_FLAG_ACK) != 0))
30 return 1;
31 return 0;
32}
33
34static_always_inline void
35gro_get_ip4_flow_from_packet (u32 * sw_if_index,
36 ip4_header_t * ip4, tcp_header_t * tcp,
37 gro_flow_key_t * flow_key, int is_l2)
38{
39 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
40 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
41 ip46_address_set_ip4 (&flow_key->src_address, &ip4->src_address);
42 ip46_address_set_ip4 (&flow_key->dst_address, &ip4->dst_address);
43 flow_key->src_port = tcp->src_port;
44 flow_key->dst_port = tcp->dst_port;
45}
46
47static_always_inline void
48gro_get_ip6_flow_from_packet (u32 * sw_if_index,
49 ip6_header_t * ip6, tcp_header_t * tcp,
50 gro_flow_key_t * flow_key, int is_l2)
51{
52 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
53 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
54 ip46_address_set_ip6 (&flow_key->src_address, &ip6->src_address);
55 ip46_address_set_ip6 (&flow_key->dst_address, &ip6->dst_address);
56 flow_key->src_port = tcp->src_port;
57 flow_key->dst_port = tcp->dst_port;
58}
59
60static_always_inline u32
61gro_is_ip4_or_ip6_packet (vlib_buffer_t * b0, int is_l2)
62{
63 if (b0->flags & VNET_BUFFER_F_IS_IP4)
64 return VNET_BUFFER_F_IS_IP4;
65 if (b0->flags & VNET_BUFFER_F_IS_IP6)
66 return VNET_BUFFER_F_IS_IP6;
67 if (is_l2)
68 {
69 ethernet_header_t *eh =
70 (ethernet_header_t *) vlib_buffer_get_current (b0);
71 u16 ethertype = clib_net_to_host_u16 (eh->type);
72
73 if (ethernet_frame_is_tagged (ethertype))
74 {
75 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
76
77 ethertype = clib_net_to_host_u16 (vlan->type);
78 if (ethertype == ETHERNET_TYPE_VLAN)
79 {
80 vlan++;
81 ethertype = clib_net_to_host_u16 (vlan->type);
82 }
83 }
84 if (ethertype == ETHERNET_TYPE_IP4)
85 return VNET_BUFFER_F_IS_IP4;
86 if (ethertype == ETHERNET_TYPE_IP6)
87 return VNET_BUFFER_F_IS_IP6;
88 }
89 else
90 {
91 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x40)
92 return VNET_BUFFER_F_IS_IP4;
93 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x60)
94 return VNET_BUFFER_F_IS_IP6;
95 }
96
97 return 0;
98}
99
100typedef enum
101{
102 GRO_PACKET_ACTION_NONE = 0,
103 GRO_PACKET_ACTION_ENQUEUE = 1,
104 GRO_PACKET_ACTION_FLUSH = 2,
105} gro_packet_action_t;
106
107static_always_inline gro_packet_action_t
108gro_tcp_sequence_check (tcp_header_t * tcp0, tcp_header_t * tcp1,
109 u32 payload_len0)
110{
111 u32 next_tcp_seq0 = clib_net_to_host_u32 (tcp0->seq_number);
112 u32 next_tcp_seq1 = clib_net_to_host_u32 (tcp1->seq_number);
113
114 /* next packet, enqueue */
115 if (PREDICT_TRUE (next_tcp_seq0 + payload_len0 == next_tcp_seq1))
116 return GRO_PACKET_ACTION_ENQUEUE;
117 /* flush all packets */
118 else
119 return GRO_PACKET_ACTION_FLUSH;
120}
121
122static_always_inline void
123gro_merge_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
124 vlib_buffer_t * b1, u32 bi1, u32 payload_len1,
125 u16 l234_sz1)
126{
127 vlib_buffer_t *pb = b0;
128
129 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
130 b0->total_length_not_including_first_buffer = 0;
131
132 while (pb->flags & VLIB_BUFFER_NEXT_PRESENT)
133 pb = vlib_get_buffer (vm, pb->next_buffer);
134
135 vlib_buffer_advance (b1, l234_sz1);
136 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
137 pb->next_buffer = bi1;
138 b0->total_length_not_including_first_buffer += payload_len1;
139 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
140}
141
142static_always_inline u32
143gro_get_packet_data (vlib_main_t * vm, vlib_buffer_t * b0,
144 generic_header_offset_t * gho0,
145 gro_flow_key_t * flow_key0, int is_l2)
146{
147 ip4_header_t *ip4_0 = 0;
148 ip6_header_t *ip6_0 = 0;
149 tcp_header_t *tcp0 = 0;
150 u32 pkt_len0 = 0;
151 u16 l234_sz0 = 0;
152 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
153
154 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
155
156 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
157 vnet_generic_header_offset_parser (b0, gho0, is_l2, 1 /* is_ip4 */ ,
158 0 /* is_ip6 */ );
159 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
160 vnet_generic_header_offset_parser (b0, gho0, is_l2, 0 /* is_ip4 */ ,
161 1 /* is_ip6 */ );
162 else
163 return 0;
164
165 if (PREDICT_FALSE ((gho0->gho_flags & GHO_F_TCP) == 0))
166 return 0;
167
168 ip4_0 =
169 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
170 ip6_0 =
171 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
172 tcp0 =
173 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0->l4_hdr_offset);
174
175 l234_sz0 = gho0->hdr_sz;
176 if (PREDICT_FALSE (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)))
177 return 0;
178
179 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
180 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
181
182 if (gho0->gho_flags & GHO_F_IP4)
183 {
184 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, flow_key0,
185 is_l2);
186 }
187 else if (gho0->gho_flags & GHO_F_IP6)
188 {
189 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, flow_key0,
190 is_l2);
191 }
192 else
193 return 0;
194
195 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
196 if (PREDICT_FALSE (pkt_len0 >= TCP_MAX_GSO_SZ))
197 return 0;
198
199 return pkt_len0;
200}
201
202static_always_inline u32
203gro_coalesce_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
204 vlib_buffer_t * b1, u32 bi1, int is_l2)
205{
206 generic_header_offset_t gho0 = { 0 };
207 generic_header_offset_t gho1 = { 0 };
208 gro_flow_key_t flow_key0, flow_key1;
209 ip4_header_t *ip4_0, *ip4_1;
210 ip6_header_t *ip6_0, *ip6_1;
211 tcp_header_t *tcp0, *tcp1;
212 u16 l234_sz0, l234_sz1;
213 u32 pkt_len0, pkt_len1, payload_len0, payload_len1;
214 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
215 u32 sw_if_index1[VLIB_N_RX_TX] = { ~0 };
216
217 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
218 u32 is_ip1 = gro_is_ip4_or_ip6_packet (b1, is_l2);
219
220 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
221 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
222 0 /* is_ip6 */ );
223 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
224 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
225 1 /* is_ip6 */ );
226 else
227 return 0;
228
229 if (is_ip1 & VNET_BUFFER_F_IS_IP4)
230 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 1 /* is_ip4 */ ,
231 0 /* is_ip6 */ );
232 else if (is_ip1 & VNET_BUFFER_F_IS_IP6)
233 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 0 /* is_ip4 */ ,
234 1 /* is_ip6 */ );
235 else
236 return 0;
237
238 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
239 pkt_len1 = vlib_buffer_length_in_chain (vm, b1);
240
241 if (((gho0.gho_flags & GHO_F_TCP) == 0)
242 || ((gho1.gho_flags & GHO_F_TCP) == 0))
243 return 0;
244
245 ip4_0 =
246 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
247 ip4_1 =
248 (ip4_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
249 ip6_0 =
250 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
251 ip6_1 =
252 (ip6_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
253
254 tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
255 tcp1 = (tcp_header_t *) (vlib_buffer_get_current (b1) + gho1.l4_hdr_offset);
256
257 l234_sz0 = gho0.hdr_sz;
258 l234_sz1 = gho1.hdr_sz;
259
260 if (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)
261 || gro_is_bad_packet (b1, tcp1->flags, l234_sz1))
262 return 0;
263
264 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
265 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
266
267 sw_if_index1[VLIB_RX] = vnet_buffer (b1)->sw_if_index[VLIB_RX];
268 sw_if_index1[VLIB_TX] = vnet_buffer (b1)->sw_if_index[VLIB_TX];
269
270 if ((gho0.gho_flags & GHO_F_IP4) && (gho1.gho_flags & GHO_F_IP4))
271 {
272 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, &flow_key0,
273 is_l2);
274 gro_get_ip4_flow_from_packet (sw_if_index1, ip4_1, tcp1, &flow_key1,
275 is_l2);
276 }
277 else if ((gho0.gho_flags & GHO_F_IP6) && (gho1.gho_flags & GHO_F_IP6))
278 {
279 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, &flow_key0,
280 is_l2);
281 gro_get_ip6_flow_from_packet (sw_if_index1, ip6_1, tcp1, &flow_key1,
282 is_l2);
283 }
284 else
285 return 0;
286
287 if (gro_flow_is_equal (&flow_key0, &flow_key1) == 0)
288 return 0;
289
290 payload_len0 = pkt_len0 - l234_sz0;
291 payload_len1 = pkt_len1 - l234_sz1;
292
293 if (pkt_len0 >= TCP_MAX_GSO_SZ || pkt_len1 >= TCP_MAX_GSO_SZ
294 || (pkt_len0 + payload_len1) >= TCP_MAX_GSO_SZ)
295 return 0;
296
297 if (gro_tcp_sequence_check (tcp0, tcp1, payload_len0) ==
298 GRO_PACKET_ACTION_ENQUEUE)
299 {
300 gro_merge_buffers (vm, b0, b1, bi1, payload_len1, l234_sz1);
301 return tcp1->ack_number;
302 }
303
304 return 0;
305}
306
307static_always_inline void
308gro_fixup_header (vlib_main_t * vm, vlib_buffer_t * b0, u32 ack_number,
309 int is_l2)
310{
311 generic_header_offset_t gho0 = { 0 };
312
313 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
314
315 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
316 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
317 0 /* is_ip6 */ );
318 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
319 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
320 1 /* is_ip6 */ );
321
322 vnet_buffer2 (b0)->gso_size = b0->current_length - gho0.hdr_sz;
323
324 if (gho0.gho_flags & GHO_F_IP4)
325 {
326 ip4_header_t *ip4 =
327 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
328 ip4->length =
329 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
330 gho0.l3_hdr_offset);
331 b0->flags |=
332 (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4 |
333 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
334 }
335 else if (gho0.gho_flags & GHO_F_IP6)
336 {
337 ip6_header_t *ip6 =
338 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
339 ip6->payload_length =
340 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
341 gho0.l4_hdr_offset);
342 b0->flags |=
343 (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6 |
344 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
345 }
346
347 tcp_header_t *tcp0 =
348 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
349 tcp0->ack_number = ack_number;
350 b0->flags &= ~VLIB_BUFFER_IS_TRACED;
351}
352
353static_always_inline u32
354vnet_gro_flow_table_flush (vlib_main_t * vm, gro_flow_table_t * flow_table,
355 u32 * to)
356{
357 if (flow_table->flow_table_size > 0)
358 {
359 gro_flow_t *gro_flow;
360 u32 i = 0, j = 0;
361 while (i < GRO_FLOW_TABLE_MAX_SIZE)
362 {
363 gro_flow = &flow_table->gro_flow[i];
364 if (gro_flow->n_buffers && gro_flow_is_timeout (vm, gro_flow))
365 {
366 // flush the packet
367 vlib_buffer_t *b0 =
368 vlib_get_buffer (vm, gro_flow->buffer_index);
369 gro_fixup_header (vm, b0, gro_flow->last_ack_number,
370 flow_table->is_l2);
371 to[j] = gro_flow->buffer_index;
372 gro_flow_table_reset_flow (flow_table, gro_flow);
373 flow_table->n_vectors++;
374 j++;
375 }
376 i++;
377 }
378
379 return j;
380 }
381 return 0;
382}
383
384static_always_inline void
385vnet_gro_flow_table_schedule_node_on_dispatcher (vlib_main_t * vm,
386 gro_flow_table_t *
387 flow_table)
388{
389 if (gro_flow_table_is_timeout (vm, flow_table))
390 {
391 u32 to[GRO_FLOW_TABLE_MAX_SIZE] = { 0 };
392 u32 n_to = vnet_gro_flow_table_flush (vm, flow_table, to);
393
394 if (n_to > 0)
395 {
396 u32 node_index = flow_table->node_index;
397 vlib_frame_t *f = vlib_get_frame_to_node (vm, node_index);
398 u32 *f_to = vlib_frame_vector_args (f);
399 u32 i = 0;
400
401 while (i < n_to)
402 {
403 f_to[f->n_vectors] = to[i];
404 i++;
405 f->n_vectors++;
406 }
407 vlib_put_frame_to_node (vm, node_index, f);
408 }
409 gro_flow_table_set_timeout (vm, flow_table, GRO_FLOW_TABLE_FLUSH);
410 }
411}
412
413static_always_inline u32
414vnet_gro_flow_table_inline (vlib_main_t * vm, gro_flow_table_t * flow_table,
415 u32 bi0, u32 * to)
416{
417 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
418 generic_header_offset_t gho0 = { 0 };
419 gro_flow_t *gro_flow = 0;
420 gro_flow_key_t flow_key0 = { };
421 tcp_header_t *tcp0 = 0;
422 u32 pkt_len0 = 0;
423 int is_l2 = flow_table->is_l2;
424
425 if (!gro_flow_table_is_enable (flow_table))
426 {
427 to[0] = bi0;
428 return 1;
429 }
430
431 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
432 {
433 to[0] = bi0;
434 return 1;
435 }
436
437 pkt_len0 = gro_get_packet_data (vm, b0, &gho0, &flow_key0, is_l2);
438 if (pkt_len0 == 0)
439 {
440 to[0] = bi0;
441 return 1;
442 }
443
444 gro_flow = gro_flow_table_find_or_add_flow (flow_table, &flow_key0);
445 if (!gro_flow)
446 {
447 to[0] = bi0;
448 return 1;
449 }
450
451 if (PREDICT_FALSE (gro_flow->n_buffers == 0))
452 {
453 flow_table->total_vectors++;
454 gro_flow_store_packet (gro_flow, bi0);
455 tcp0 =
456 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
457 gro_flow->last_ack_number = tcp0->ack_number;
458 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
459 return 0;
460 }
461 else
462 {
463 tcp0 =
464 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
465 generic_header_offset_t gho_s = { 0 };
466 tcp_header_t *tcp_s;
467 u16 l234_sz0, l234_sz_s;
468 u32 pkt_len_s, payload_len0, payload_len_s;
469 u32 bi_s = gro_flow->buffer_index;
470
471 vlib_buffer_t *b_s = vlib_get_buffer (vm, bi_s);
472 u32 is_ip_s = gro_is_ip4_or_ip6_packet (b_s, is_l2);
473 if (is_ip_s & VNET_BUFFER_F_IS_IP4)
474 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
475 1 /* is_ip4 */ , 0 /* is_ip6 */ );
476 else if (is_ip_s & VNET_BUFFER_F_IS_IP6)
477 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
478 0 /* is_ip4 */ , 1 /* is_ip6 */ );
479
480 tcp_s =
481 (tcp_header_t *) (vlib_buffer_get_current (b_s) +
482 gho_s.l4_hdr_offset);
483 pkt_len_s = vlib_buffer_length_in_chain (vm, b_s);
484 l234_sz0 = gho0.hdr_sz;
485 l234_sz_s = gho_s.hdr_sz;
486 payload_len0 = pkt_len0 - l234_sz0;
487 payload_len_s = pkt_len_s - l234_sz_s;
488 gro_packet_action_t action =
489 gro_tcp_sequence_check (tcp_s, tcp0, payload_len_s);
490
491 if (PREDICT_TRUE (action == GRO_PACKET_ACTION_ENQUEUE))
492 {
493 if (PREDICT_TRUE ((pkt_len_s + payload_len0) < TCP_MAX_GSO_SZ))
494 {
495 flow_table->total_vectors++;
496 gro_merge_buffers (vm, b_s, b0, bi0, payload_len0, l234_sz0);
497 gro_flow_store_packet (gro_flow, bi0);
498 gro_flow->last_ack_number = tcp0->ack_number;
499 return 0;
500 }
501 else
502 {
503 // flush the stored GSO size packet and buffer the current packet
504 flow_table->n_vectors++;
505 flow_table->total_vectors++;
506 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
507 gro_flow->n_buffers = 0;
508 gro_flow_store_packet (gro_flow, bi0);
509 gro_flow->last_ack_number = tcp0->ack_number;
510 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
511 to[0] = bi_s;
512 return 1;
513 }
514 }
515 else
516 {
517 // flush the all (current and stored) packets
518 flow_table->n_vectors++;
519 flow_table->total_vectors++;
520 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
521 gro_flow->n_buffers = 0;
522 gro_flow_table_reset_flow (flow_table, gro_flow);
523 to[0] = bi_s;
524 to[1] = bi0;
525 return 2;
526 }
527 }
528}
529
530/**
531 * coalesce buffers with flow tables
532 */
533static_always_inline u32
534vnet_gro_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, u32 * from,
535 u16 n_left_from, u32 * to)
536{
537 u16 count = 0, i = 0;
538
539 for (i = 0; i < n_left_from; i++)
540 count += vnet_gro_flow_table_inline (vm, flow_table, from[i], &to[count]);
541
542 return count;
543}
544
545/**
546 * coalesce buffers in opportunistic way without flow tables
547 */
548static_always_inline u32
549vnet_gro_simple_inline (vlib_main_t * vm, u32 * from, u16 n_left_from,
550 int is_l2)
551{
552 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
553 vlib_get_buffers (vm, from, b, n_left_from);
554 u32 bi = 1, ack_number = 0;
555 if (PREDICT_TRUE (((b[0]->flags & VNET_BUFFER_F_GSO) == 0)))
556 {
557 while (n_left_from > 1)
558 {
559 if (PREDICT_TRUE (((b[bi]->flags & VNET_BUFFER_F_GSO) == 0)))
560 {
561 u32 ret;
562 if ((ret =
563 gro_coalesce_buffers (vm, b[0], b[bi], from[bi],
564 is_l2)) != 0)
565 {
566 n_left_from -= 1;
567 bi += 1;
568 ack_number = ret;
569 continue;
570 }
571 else
572 break;
573 }
574 else
575 break;
576 }
577
578 if (bi >= 2)
579 {
580 gro_fixup_header (vm, b[0], ack_number, is_l2);
581 }
582 }
583 return bi;
584}
585#endif /* included_gro_func_h */
586
587/*
588 * fd.io coding-style-patch-verification: ON
589 *
590 * Local Variables:
591 * eval: (c-set-style "gnu")
592 * End:
593 */