blob: b29d4a5f94453c52780b6fabbb494d1d369ddccb [file] [log] [blame]
Mohsin Kazmif382b062020-08-11 15:00:44 +02001/*
2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_gro_func_h
17#define included_gro_func_h
18
19#include <vnet/ethernet/ethernet.h>
20#include <vnet/gso/gro.h>
21#include <vnet/gso/hdr_offset_parser.h>
Florin Corasb040f982020-10-20 14:59:43 -070022#include <vnet/ip/ip4.h>
23#include <vnet/ip/ip6.h>
Mohsin Kazmif382b062020-08-11 15:00:44 +020024#include <vnet/udp/udp_packet.h>
Florin Coras97f96942020-10-20 13:45:51 -070025#include <vnet/tcp/tcp_packet.h>
Mohsin Kazmif382b062020-08-11 15:00:44 +020026#include <vnet/vnet.h>
27
28static_always_inline u8
29gro_is_bad_packet (vlib_buffer_t * b, u8 flags, i16 l234_sz)
30{
Mohsin Kazmi8758a942021-05-28 17:11:23 +020031 if (((b->current_length - l234_sz) <= 0) ||
32 ((flags &= ~(TCP_FLAG_ACK | TCP_FLAG_PSH)) != 0))
Mohsin Kazmif382b062020-08-11 15:00:44 +020033 return 1;
34 return 0;
35}
36
37static_always_inline void
38gro_get_ip4_flow_from_packet (u32 * sw_if_index,
39 ip4_header_t * ip4, tcp_header_t * tcp,
40 gro_flow_key_t * flow_key, int is_l2)
41{
42 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
43 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
44 ip46_address_set_ip4 (&flow_key->src_address, &ip4->src_address);
45 ip46_address_set_ip4 (&flow_key->dst_address, &ip4->dst_address);
46 flow_key->src_port = tcp->src_port;
47 flow_key->dst_port = tcp->dst_port;
48}
49
50static_always_inline void
51gro_get_ip6_flow_from_packet (u32 * sw_if_index,
52 ip6_header_t * ip6, tcp_header_t * tcp,
53 gro_flow_key_t * flow_key, int is_l2)
54{
55 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
56 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
57 ip46_address_set_ip6 (&flow_key->src_address, &ip6->src_address);
58 ip46_address_set_ip6 (&flow_key->dst_address, &ip6->dst_address);
59 flow_key->src_port = tcp->src_port;
60 flow_key->dst_port = tcp->dst_port;
61}
62
63static_always_inline u32
Mohsin Kazmi8758a942021-05-28 17:11:23 +020064gro_is_ip4_or_ip6_packet (vlib_buffer_t *b0, u8 is_l2)
Mohsin Kazmif382b062020-08-11 15:00:44 +020065{
66 if (b0->flags & VNET_BUFFER_F_IS_IP4)
67 return VNET_BUFFER_F_IS_IP4;
68 if (b0->flags & VNET_BUFFER_F_IS_IP6)
69 return VNET_BUFFER_F_IS_IP6;
70 if (is_l2)
71 {
72 ethernet_header_t *eh =
73 (ethernet_header_t *) vlib_buffer_get_current (b0);
74 u16 ethertype = clib_net_to_host_u16 (eh->type);
75
76 if (ethernet_frame_is_tagged (ethertype))
77 {
78 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
79
80 ethertype = clib_net_to_host_u16 (vlan->type);
81 if (ethertype == ETHERNET_TYPE_VLAN)
82 {
83 vlan++;
84 ethertype = clib_net_to_host_u16 (vlan->type);
85 }
86 }
87 if (ethertype == ETHERNET_TYPE_IP4)
88 return VNET_BUFFER_F_IS_IP4;
89 if (ethertype == ETHERNET_TYPE_IP6)
90 return VNET_BUFFER_F_IS_IP6;
91 }
92 else
93 {
94 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x40)
95 return VNET_BUFFER_F_IS_IP4;
96 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x60)
97 return VNET_BUFFER_F_IS_IP6;
98 }
99
100 return 0;
101}
102
103typedef enum
104{
105 GRO_PACKET_ACTION_NONE = 0,
106 GRO_PACKET_ACTION_ENQUEUE = 1,
107 GRO_PACKET_ACTION_FLUSH = 2,
108} gro_packet_action_t;
109
110static_always_inline gro_packet_action_t
111gro_tcp_sequence_check (tcp_header_t * tcp0, tcp_header_t * tcp1,
112 u32 payload_len0)
113{
114 u32 next_tcp_seq0 = clib_net_to_host_u32 (tcp0->seq_number);
115 u32 next_tcp_seq1 = clib_net_to_host_u32 (tcp1->seq_number);
116
117 /* next packet, enqueue */
118 if (PREDICT_TRUE (next_tcp_seq0 + payload_len0 == next_tcp_seq1))
119 return GRO_PACKET_ACTION_ENQUEUE;
120 /* flush all packets */
121 else
122 return GRO_PACKET_ACTION_FLUSH;
123}
124
125static_always_inline void
126gro_merge_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
127 vlib_buffer_t * b1, u32 bi1, u32 payload_len1,
128 u16 l234_sz1)
129{
130 vlib_buffer_t *pb = b0;
131
132 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
133 b0->total_length_not_including_first_buffer = 0;
134
135 while (pb->flags & VLIB_BUFFER_NEXT_PRESENT)
136 pb = vlib_get_buffer (vm, pb->next_buffer);
137
138 vlib_buffer_advance (b1, l234_sz1);
139 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
140 pb->next_buffer = bi1;
141 b0->total_length_not_including_first_buffer += payload_len1;
142 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
143}
144
145static_always_inline u32
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200146gro_validate_checksum (vlib_main_t * vm, vlib_buffer_t * b0,
147 generic_header_offset_t * gho0, int is_ip4)
148{
149 u32 flags = 0;
150
Mohsin Kazmi68095382021-02-10 11:26:24 +0100151 if (b0->flags & VNET_BUFFER_F_OFFLOAD)
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200152 return VNET_BUFFER_F_L4_CHECKSUM_CORRECT;
153 vlib_buffer_advance (b0, gho0->l3_hdr_offset);
154 if (is_ip4)
155 flags = ip4_tcp_udp_validate_checksum (vm, b0);
156 else
157 flags = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
158 vlib_buffer_advance (b0, -gho0->l3_hdr_offset);
159 return flags;
160}
161
162static_always_inline u32
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200163gro_get_packet_data (vlib_main_t *vm, vlib_buffer_t *b0,
164 generic_header_offset_t *gho0, gro_flow_key_t *flow_key0,
165 u8 is_l2)
Mohsin Kazmif382b062020-08-11 15:00:44 +0200166{
167 ip4_header_t *ip4_0 = 0;
168 ip6_header_t *ip6_0 = 0;
169 tcp_header_t *tcp0 = 0;
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200170 u32 flags = 0;
Mohsin Kazmif382b062020-08-11 15:00:44 +0200171 u32 pkt_len0 = 0;
172 u16 l234_sz0 = 0;
173 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
174
175 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
176
177 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
178 vnet_generic_header_offset_parser (b0, gho0, is_l2, 1 /* is_ip4 */ ,
179 0 /* is_ip6 */ );
180 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
181 vnet_generic_header_offset_parser (b0, gho0, is_l2, 0 /* is_ip4 */ ,
182 1 /* is_ip6 */ );
183 else
184 return 0;
185
186 if (PREDICT_FALSE ((gho0->gho_flags & GHO_F_TCP) == 0))
187 return 0;
188
189 ip4_0 =
190 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
191 ip6_0 =
192 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
193 tcp0 =
194 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0->l4_hdr_offset);
195
196 l234_sz0 = gho0->hdr_sz;
197 if (PREDICT_FALSE (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)))
198 return 0;
199
200 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
201 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
202
203 if (gho0->gho_flags & GHO_F_IP4)
204 {
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200205 flags = gro_validate_checksum (vm, b0, gho0, 1);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200206 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, flow_key0,
207 is_l2);
208 }
209 else if (gho0->gho_flags & GHO_F_IP6)
210 {
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200211 flags = gro_validate_checksum (vm, b0, gho0, 0);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200212 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, flow_key0,
213 is_l2);
214 }
215 else
216 return 0;
217
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200218 if (PREDICT_FALSE ((flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) == 0))
Mohsin Kazmif8d421f2020-10-06 11:58:40 +0200219 return 0;
220
Mohsin Kazmif382b062020-08-11 15:00:44 +0200221 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
222 if (PREDICT_FALSE (pkt_len0 >= TCP_MAX_GSO_SZ))
223 return 0;
224
225 return pkt_len0;
226}
227
228static_always_inline u32
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200229gro_coalesce_buffers (vlib_main_t *vm, vlib_buffer_t *b0, vlib_buffer_t *b1,
230 u32 bi1, u8 is_l2)
Mohsin Kazmif382b062020-08-11 15:00:44 +0200231{
232 generic_header_offset_t gho0 = { 0 };
233 generic_header_offset_t gho1 = { 0 };
234 gro_flow_key_t flow_key0, flow_key1;
235 ip4_header_t *ip4_0, *ip4_1;
236 ip6_header_t *ip6_0, *ip6_1;
237 tcp_header_t *tcp0, *tcp1;
238 u16 l234_sz0, l234_sz1;
239 u32 pkt_len0, pkt_len1, payload_len0, payload_len1;
240 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
241 u32 sw_if_index1[VLIB_N_RX_TX] = { ~0 };
242
243 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
244 u32 is_ip1 = gro_is_ip4_or_ip6_packet (b1, is_l2);
245
246 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
247 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
248 0 /* is_ip6 */ );
249 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
250 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
251 1 /* is_ip6 */ );
252 else
253 return 0;
254
255 if (is_ip1 & VNET_BUFFER_F_IS_IP4)
256 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 1 /* is_ip4 */ ,
257 0 /* is_ip6 */ );
258 else if (is_ip1 & VNET_BUFFER_F_IS_IP6)
259 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 0 /* is_ip4 */ ,
260 1 /* is_ip6 */ );
261 else
262 return 0;
263
264 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
265 pkt_len1 = vlib_buffer_length_in_chain (vm, b1);
266
267 if (((gho0.gho_flags & GHO_F_TCP) == 0)
268 || ((gho1.gho_flags & GHO_F_TCP) == 0))
269 return 0;
270
271 ip4_0 =
272 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
273 ip4_1 =
274 (ip4_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
275 ip6_0 =
276 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
277 ip6_1 =
278 (ip6_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
279
280 tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
281 tcp1 = (tcp_header_t *) (vlib_buffer_get_current (b1) + gho1.l4_hdr_offset);
282
283 l234_sz0 = gho0.hdr_sz;
284 l234_sz1 = gho1.hdr_sz;
285
286 if (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)
287 || gro_is_bad_packet (b1, tcp1->flags, l234_sz1))
288 return 0;
289
290 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
291 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
292
293 sw_if_index1[VLIB_RX] = vnet_buffer (b1)->sw_if_index[VLIB_RX];
294 sw_if_index1[VLIB_TX] = vnet_buffer (b1)->sw_if_index[VLIB_TX];
295
296 if ((gho0.gho_flags & GHO_F_IP4) && (gho1.gho_flags & GHO_F_IP4))
297 {
298 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, &flow_key0,
299 is_l2);
300 gro_get_ip4_flow_from_packet (sw_if_index1, ip4_1, tcp1, &flow_key1,
301 is_l2);
302 }
303 else if ((gho0.gho_flags & GHO_F_IP6) && (gho1.gho_flags & GHO_F_IP6))
304 {
305 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, &flow_key0,
306 is_l2);
307 gro_get_ip6_flow_from_packet (sw_if_index1, ip6_1, tcp1, &flow_key1,
308 is_l2);
309 }
310 else
311 return 0;
312
313 if (gro_flow_is_equal (&flow_key0, &flow_key1) == 0)
314 return 0;
315
316 payload_len0 = pkt_len0 - l234_sz0;
317 payload_len1 = pkt_len1 - l234_sz1;
318
319 if (pkt_len0 >= TCP_MAX_GSO_SZ || pkt_len1 >= TCP_MAX_GSO_SZ
320 || (pkt_len0 + payload_len1) >= TCP_MAX_GSO_SZ)
321 return 0;
322
323 if (gro_tcp_sequence_check (tcp0, tcp1, payload_len0) ==
324 GRO_PACKET_ACTION_ENQUEUE)
325 {
326 gro_merge_buffers (vm, b0, b1, bi1, payload_len1, l234_sz1);
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200327 tcp0->flags |= tcp1->flags;
Mohsin Kazmif382b062020-08-11 15:00:44 +0200328 return tcp1->ack_number;
329 }
330
331 return 0;
332}
333
334static_always_inline void
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200335gro_fixup_header (vlib_main_t *vm, vlib_buffer_t *b0, u32 ack_number, u8 is_l2)
Mohsin Kazmif382b062020-08-11 15:00:44 +0200336{
337 generic_header_offset_t gho0 = { 0 };
338
339 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
340
341 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
342 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
343 0 /* is_ip6 */ );
344 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
345 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
346 1 /* is_ip6 */ );
347
348 vnet_buffer2 (b0)->gso_size = b0->current_length - gho0.hdr_sz;
349
350 if (gho0.gho_flags & GHO_F_IP4)
351 {
352 ip4_header_t *ip4 =
353 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
354 ip4->length =
355 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
356 gho0.l3_hdr_offset);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100357 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
358 vnet_buffer_offload_flags_set (b0, (VNET_BUFFER_OFFLOAD_F_TCP_CKSUM |
359 VNET_BUFFER_OFFLOAD_F_IP_CKSUM));
Mohsin Kazmif382b062020-08-11 15:00:44 +0200360 }
361 else if (gho0.gho_flags & GHO_F_IP6)
362 {
363 ip6_header_t *ip6 =
364 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
365 ip6->payload_length =
366 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
367 gho0.l4_hdr_offset);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100368 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
369 vnet_buffer_offload_flags_set (b0, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200370 }
371
372 tcp_header_t *tcp0 =
373 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
374 tcp0->ack_number = ack_number;
375 b0->flags &= ~VLIB_BUFFER_IS_TRACED;
376}
377
378static_always_inline u32
379vnet_gro_flow_table_flush (vlib_main_t * vm, gro_flow_table_t * flow_table,
380 u32 * to)
381{
382 if (flow_table->flow_table_size > 0)
383 {
384 gro_flow_t *gro_flow;
385 u32 i = 0, j = 0;
386 while (i < GRO_FLOW_TABLE_MAX_SIZE)
387 {
388 gro_flow = &flow_table->gro_flow[i];
389 if (gro_flow->n_buffers && gro_flow_is_timeout (vm, gro_flow))
390 {
391 // flush the packet
392 vlib_buffer_t *b0 =
393 vlib_get_buffer (vm, gro_flow->buffer_index);
394 gro_fixup_header (vm, b0, gro_flow->last_ack_number,
395 flow_table->is_l2);
396 to[j] = gro_flow->buffer_index;
397 gro_flow_table_reset_flow (flow_table, gro_flow);
398 flow_table->n_vectors++;
399 j++;
400 }
401 i++;
402 }
403
404 return j;
405 }
406 return 0;
407}
408
409static_always_inline void
410vnet_gro_flow_table_schedule_node_on_dispatcher (vlib_main_t * vm,
411 gro_flow_table_t *
412 flow_table)
413{
414 if (gro_flow_table_is_timeout (vm, flow_table))
415 {
416 u32 to[GRO_FLOW_TABLE_MAX_SIZE] = { 0 };
417 u32 n_to = vnet_gro_flow_table_flush (vm, flow_table, to);
418
419 if (n_to > 0)
420 {
421 u32 node_index = flow_table->node_index;
422 vlib_frame_t *f = vlib_get_frame_to_node (vm, node_index);
423 u32 *f_to = vlib_frame_vector_args (f);
424 u32 i = 0;
425
426 while (i < n_to)
427 {
428 f_to[f->n_vectors] = to[i];
429 i++;
430 f->n_vectors++;
431 }
432 vlib_put_frame_to_node (vm, node_index, f);
433 }
434 gro_flow_table_set_timeout (vm, flow_table, GRO_FLOW_TABLE_FLUSH);
435 }
436}
437
438static_always_inline u32
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200439vnet_gro_flush_all_packets (vlib_main_t *vm, gro_flow_table_t *flow_table,
440 gro_flow_t *gro_flow, vlib_buffer_t *b_s, u32 *to,
441 u32 bi_s, u32 bi0, u8 is_l2)
442{
443 flow_table->n_vectors++;
444 flow_table->total_vectors++;
445 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
446 gro_flow->n_buffers = 0;
447 gro_flow_table_reset_flow (flow_table, gro_flow);
448 to[0] = bi_s;
449 to[1] = bi0;
450 return 2;
451}
452
453static_always_inline u32
Mohsin Kazmif382b062020-08-11 15:00:44 +0200454vnet_gro_flow_table_inline (vlib_main_t * vm, gro_flow_table_t * flow_table,
455 u32 bi0, u32 * to)
456{
457 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
458 generic_header_offset_t gho0 = { 0 };
459 gro_flow_t *gro_flow = 0;
460 gro_flow_key_t flow_key0 = { };
461 tcp_header_t *tcp0 = 0;
462 u32 pkt_len0 = 0;
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200463 u32 is_flush = 0;
464 u8 is_l2 = flow_table->is_l2;
Mohsin Kazmif382b062020-08-11 15:00:44 +0200465
466 if (!gro_flow_table_is_enable (flow_table))
467 {
468 to[0] = bi0;
469 return 1;
470 }
471
472 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
473 {
474 to[0] = bi0;
475 return 1;
476 }
477
478 pkt_len0 = gro_get_packet_data (vm, b0, &gho0, &flow_key0, is_l2);
479 if (pkt_len0 == 0)
480 {
481 to[0] = bi0;
482 return 1;
483 }
484
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200485 tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
486 if (PREDICT_TRUE ((tcp0->flags & TCP_FLAG_PSH) == 0))
487 gro_flow = gro_flow_table_find_or_add_flow (flow_table, &flow_key0);
488 else
489 {
490 is_flush = 1;
491 gro_flow = gro_flow_table_get_flow (flow_table, &flow_key0);
492 }
493
Mohsin Kazmif382b062020-08-11 15:00:44 +0200494 if (!gro_flow)
495 {
496 to[0] = bi0;
497 return 1;
498 }
499
500 if (PREDICT_FALSE (gro_flow->n_buffers == 0))
501 {
502 flow_table->total_vectors++;
503 gro_flow_store_packet (gro_flow, bi0);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200504 gro_flow->last_ack_number = tcp0->ack_number;
505 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
506 return 0;
507 }
508 else
509 {
Mohsin Kazmif382b062020-08-11 15:00:44 +0200510 generic_header_offset_t gho_s = { 0 };
511 tcp_header_t *tcp_s;
512 u16 l234_sz0, l234_sz_s;
513 u32 pkt_len_s, payload_len0, payload_len_s;
514 u32 bi_s = gro_flow->buffer_index;
515
516 vlib_buffer_t *b_s = vlib_get_buffer (vm, bi_s);
517 u32 is_ip_s = gro_is_ip4_or_ip6_packet (b_s, is_l2);
518 if (is_ip_s & VNET_BUFFER_F_IS_IP4)
519 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
520 1 /* is_ip4 */ , 0 /* is_ip6 */ );
521 else if (is_ip_s & VNET_BUFFER_F_IS_IP6)
522 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
523 0 /* is_ip4 */ , 1 /* is_ip6 */ );
524
525 tcp_s =
526 (tcp_header_t *) (vlib_buffer_get_current (b_s) +
527 gho_s.l4_hdr_offset);
528 pkt_len_s = vlib_buffer_length_in_chain (vm, b_s);
529 l234_sz0 = gho0.hdr_sz;
530 l234_sz_s = gho_s.hdr_sz;
531 payload_len0 = pkt_len0 - l234_sz0;
532 payload_len_s = pkt_len_s - l234_sz_s;
533 gro_packet_action_t action =
534 gro_tcp_sequence_check (tcp_s, tcp0, payload_len_s);
535
536 if (PREDICT_TRUE (action == GRO_PACKET_ACTION_ENQUEUE))
537 {
Mohsin Kazmi9314ed82021-05-05 16:25:39 +0000538 if (PREDICT_TRUE (((pkt_len_s + payload_len0) < TCP_MAX_GSO_SZ) &&
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200539 (gro_flow->n_buffers < GRO_FLOW_N_BUFFERS)))
Mohsin Kazmif382b062020-08-11 15:00:44 +0200540 {
541 flow_table->total_vectors++;
542 gro_merge_buffers (vm, b_s, b0, bi0, payload_len0, l234_sz0);
543 gro_flow_store_packet (gro_flow, bi0);
544 gro_flow->last_ack_number = tcp0->ack_number;
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200545 if (PREDICT_FALSE (is_flush))
546 {
547 flow_table->n_vectors++;
548 tcp_s->flags |= tcp0->flags;
549 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
550 gro_flow->n_buffers = 0;
551 gro_flow_table_reset_flow (flow_table, gro_flow);
552 to[0] = bi_s;
553 return 1;
554 }
Mohsin Kazmif382b062020-08-11 15:00:44 +0200555 return 0;
556 }
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200557 else if (PREDICT_FALSE (is_flush))
558 // flush the all (current and stored) packets
559 return vnet_gro_flush_all_packets (vm, flow_table, gro_flow, b_s,
560 to, bi_s, bi0, is_l2);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200561 else
562 {
563 // flush the stored GSO size packet and buffer the current packet
564 flow_table->n_vectors++;
565 flow_table->total_vectors++;
566 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
567 gro_flow->n_buffers = 0;
568 gro_flow_store_packet (gro_flow, bi0);
569 gro_flow->last_ack_number = tcp0->ack_number;
570 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
571 to[0] = bi_s;
572 return 1;
573 }
574 }
575 else
576 {
577 // flush the all (current and stored) packets
Mohsin Kazmi8758a942021-05-28 17:11:23 +0200578 return vnet_gro_flush_all_packets (vm, flow_table, gro_flow, b_s, to,
579 bi_s, bi0, is_l2);
Mohsin Kazmif382b062020-08-11 15:00:44 +0200580 }
581 }
582}
583
584/**
585 * coalesce buffers with flow tables
586 */
587static_always_inline u32
588vnet_gro_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, u32 * from,
589 u16 n_left_from, u32 * to)
590{
591 u16 count = 0, i = 0;
592
593 for (i = 0; i < n_left_from; i++)
594 count += vnet_gro_flow_table_inline (vm, flow_table, from[i], &to[count]);
595
596 return count;
597}
598
599/**
600 * coalesce buffers in opportunistic way without flow tables
601 */
602static_always_inline u32
603vnet_gro_simple_inline (vlib_main_t * vm, u32 * from, u16 n_left_from,
604 int is_l2)
605{
606 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
607 vlib_get_buffers (vm, from, b, n_left_from);
608 u32 bi = 1, ack_number = 0;
609 if (PREDICT_TRUE (((b[0]->flags & VNET_BUFFER_F_GSO) == 0)))
610 {
611 while (n_left_from > 1)
612 {
613 if (PREDICT_TRUE (((b[bi]->flags & VNET_BUFFER_F_GSO) == 0)))
614 {
615 u32 ret;
616 if ((ret =
617 gro_coalesce_buffers (vm, b[0], b[bi], from[bi],
618 is_l2)) != 0)
619 {
620 n_left_from -= 1;
621 bi += 1;
622 ack_number = ret;
623 continue;
624 }
625 else
626 break;
627 }
628 else
629 break;
630 }
631
632 if (bi >= 2)
633 {
634 gro_fixup_header (vm, b[0], ack_number, is_l2);
635 }
636 }
637 return bi;
638}
639#endif /* included_gro_func_h */
640
641/*
642 * fd.io coding-style-patch-verification: ON
643 *
644 * Local Variables:
645 * eval: (c-set-style "gnu")
646 * End:
647 */