Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2020 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #ifndef included_gro_func_h |
| 17 | #define included_gro_func_h |
| 18 | |
| 19 | #include <vnet/ethernet/ethernet.h> |
| 20 | #include <vnet/gso/gro.h> |
| 21 | #include <vnet/gso/hdr_offset_parser.h> |
Florin Coras | b040f98 | 2020-10-20 14:59:43 -0700 | [diff] [blame] | 22 | #include <vnet/ip/ip4.h> |
| 23 | #include <vnet/ip/ip6.h> |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 24 | #include <vnet/udp/udp_packet.h> |
Florin Coras | 97f9694 | 2020-10-20 13:45:51 -0700 | [diff] [blame] | 25 | #include <vnet/tcp/tcp_packet.h> |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 26 | #include <vnet/vnet.h> |
| 27 | |
| 28 | static_always_inline u8 |
| 29 | gro_is_bad_packet (vlib_buffer_t * b, u8 flags, i16 l234_sz) |
| 30 | { |
| 31 | if (((b->current_length - l234_sz) <= 0) || ((flags &= ~TCP_FLAG_ACK) != 0)) |
| 32 | return 1; |
| 33 | return 0; |
| 34 | } |
| 35 | |
| 36 | static_always_inline void |
| 37 | gro_get_ip4_flow_from_packet (u32 * sw_if_index, |
| 38 | ip4_header_t * ip4, tcp_header_t * tcp, |
| 39 | gro_flow_key_t * flow_key, int is_l2) |
| 40 | { |
| 41 | flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX]; |
| 42 | flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX]; |
| 43 | ip46_address_set_ip4 (&flow_key->src_address, &ip4->src_address); |
| 44 | ip46_address_set_ip4 (&flow_key->dst_address, &ip4->dst_address); |
| 45 | flow_key->src_port = tcp->src_port; |
| 46 | flow_key->dst_port = tcp->dst_port; |
| 47 | } |
| 48 | |
| 49 | static_always_inline void |
| 50 | gro_get_ip6_flow_from_packet (u32 * sw_if_index, |
| 51 | ip6_header_t * ip6, tcp_header_t * tcp, |
| 52 | gro_flow_key_t * flow_key, int is_l2) |
| 53 | { |
| 54 | flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX]; |
| 55 | flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX]; |
| 56 | ip46_address_set_ip6 (&flow_key->src_address, &ip6->src_address); |
| 57 | ip46_address_set_ip6 (&flow_key->dst_address, &ip6->dst_address); |
| 58 | flow_key->src_port = tcp->src_port; |
| 59 | flow_key->dst_port = tcp->dst_port; |
| 60 | } |
| 61 | |
| 62 | static_always_inline u32 |
| 63 | gro_is_ip4_or_ip6_packet (vlib_buffer_t * b0, int is_l2) |
| 64 | { |
| 65 | if (b0->flags & VNET_BUFFER_F_IS_IP4) |
| 66 | return VNET_BUFFER_F_IS_IP4; |
| 67 | if (b0->flags & VNET_BUFFER_F_IS_IP6) |
| 68 | return VNET_BUFFER_F_IS_IP6; |
| 69 | if (is_l2) |
| 70 | { |
| 71 | ethernet_header_t *eh = |
| 72 | (ethernet_header_t *) vlib_buffer_get_current (b0); |
| 73 | u16 ethertype = clib_net_to_host_u16 (eh->type); |
| 74 | |
| 75 | if (ethernet_frame_is_tagged (ethertype)) |
| 76 | { |
| 77 | ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1); |
| 78 | |
| 79 | ethertype = clib_net_to_host_u16 (vlan->type); |
| 80 | if (ethertype == ETHERNET_TYPE_VLAN) |
| 81 | { |
| 82 | vlan++; |
| 83 | ethertype = clib_net_to_host_u16 (vlan->type); |
| 84 | } |
| 85 | } |
| 86 | if (ethertype == ETHERNET_TYPE_IP4) |
| 87 | return VNET_BUFFER_F_IS_IP4; |
| 88 | if (ethertype == ETHERNET_TYPE_IP6) |
| 89 | return VNET_BUFFER_F_IS_IP6; |
| 90 | } |
| 91 | else |
| 92 | { |
| 93 | if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x40) |
| 94 | return VNET_BUFFER_F_IS_IP4; |
| 95 | if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x60) |
| 96 | return VNET_BUFFER_F_IS_IP6; |
| 97 | } |
| 98 | |
| 99 | return 0; |
| 100 | } |
| 101 | |
| 102 | typedef enum |
| 103 | { |
| 104 | GRO_PACKET_ACTION_NONE = 0, |
| 105 | GRO_PACKET_ACTION_ENQUEUE = 1, |
| 106 | GRO_PACKET_ACTION_FLUSH = 2, |
| 107 | } gro_packet_action_t; |
| 108 | |
| 109 | static_always_inline gro_packet_action_t |
| 110 | gro_tcp_sequence_check (tcp_header_t * tcp0, tcp_header_t * tcp1, |
| 111 | u32 payload_len0) |
| 112 | { |
| 113 | u32 next_tcp_seq0 = clib_net_to_host_u32 (tcp0->seq_number); |
| 114 | u32 next_tcp_seq1 = clib_net_to_host_u32 (tcp1->seq_number); |
| 115 | |
| 116 | /* next packet, enqueue */ |
| 117 | if (PREDICT_TRUE (next_tcp_seq0 + payload_len0 == next_tcp_seq1)) |
| 118 | return GRO_PACKET_ACTION_ENQUEUE; |
| 119 | /* flush all packets */ |
| 120 | else |
| 121 | return GRO_PACKET_ACTION_FLUSH; |
| 122 | } |
| 123 | |
| 124 | static_always_inline void |
| 125 | gro_merge_buffers (vlib_main_t * vm, vlib_buffer_t * b0, |
| 126 | vlib_buffer_t * b1, u32 bi1, u32 payload_len1, |
| 127 | u16 l234_sz1) |
| 128 | { |
| 129 | vlib_buffer_t *pb = b0; |
| 130 | |
| 131 | if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0)) |
| 132 | b0->total_length_not_including_first_buffer = 0; |
| 133 | |
| 134 | while (pb->flags & VLIB_BUFFER_NEXT_PRESENT) |
| 135 | pb = vlib_get_buffer (vm, pb->next_buffer); |
| 136 | |
| 137 | vlib_buffer_advance (b1, l234_sz1); |
| 138 | pb->flags |= VLIB_BUFFER_NEXT_PRESENT; |
| 139 | pb->next_buffer = bi1; |
| 140 | b0->total_length_not_including_first_buffer += payload_len1; |
| 141 | b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; |
| 142 | } |
| 143 | |
| 144 | static_always_inline u32 |
Mohsin Kazmi | f8d421f | 2020-10-06 11:58:40 +0200 | [diff] [blame] | 145 | gro_validate_checksum (vlib_main_t * vm, vlib_buffer_t * b0, |
| 146 | generic_header_offset_t * gho0, int is_ip4) |
| 147 | { |
| 148 | u32 flags = 0; |
| 149 | |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 150 | if (b0->flags & VNET_BUFFER_F_OFFLOAD) |
Mohsin Kazmi | f8d421f | 2020-10-06 11:58:40 +0200 | [diff] [blame] | 151 | return VNET_BUFFER_F_L4_CHECKSUM_CORRECT; |
| 152 | vlib_buffer_advance (b0, gho0->l3_hdr_offset); |
| 153 | if (is_ip4) |
| 154 | flags = ip4_tcp_udp_validate_checksum (vm, b0); |
| 155 | else |
| 156 | flags = ip6_tcp_udp_icmp_validate_checksum (vm, b0); |
| 157 | vlib_buffer_advance (b0, -gho0->l3_hdr_offset); |
| 158 | return flags; |
| 159 | } |
| 160 | |
| 161 | static_always_inline u32 |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 162 | gro_get_packet_data (vlib_main_t * vm, vlib_buffer_t * b0, |
| 163 | generic_header_offset_t * gho0, |
| 164 | gro_flow_key_t * flow_key0, int is_l2) |
| 165 | { |
| 166 | ip4_header_t *ip4_0 = 0; |
| 167 | ip6_header_t *ip6_0 = 0; |
| 168 | tcp_header_t *tcp0 = 0; |
Mohsin Kazmi | f8d421f | 2020-10-06 11:58:40 +0200 | [diff] [blame] | 169 | u32 flags = 0; |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 170 | u32 pkt_len0 = 0; |
| 171 | u16 l234_sz0 = 0; |
| 172 | u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 }; |
| 173 | |
| 174 | u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2); |
| 175 | |
| 176 | if (is_ip0 & VNET_BUFFER_F_IS_IP4) |
| 177 | vnet_generic_header_offset_parser (b0, gho0, is_l2, 1 /* is_ip4 */ , |
| 178 | 0 /* is_ip6 */ ); |
| 179 | else if (is_ip0 & VNET_BUFFER_F_IS_IP6) |
| 180 | vnet_generic_header_offset_parser (b0, gho0, is_l2, 0 /* is_ip4 */ , |
| 181 | 1 /* is_ip6 */ ); |
| 182 | else |
| 183 | return 0; |
| 184 | |
| 185 | if (PREDICT_FALSE ((gho0->gho_flags & GHO_F_TCP) == 0)) |
| 186 | return 0; |
| 187 | |
| 188 | ip4_0 = |
| 189 | (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset); |
| 190 | ip6_0 = |
| 191 | (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset); |
| 192 | tcp0 = |
| 193 | (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0->l4_hdr_offset); |
| 194 | |
| 195 | l234_sz0 = gho0->hdr_sz; |
| 196 | if (PREDICT_FALSE (gro_is_bad_packet (b0, tcp0->flags, l234_sz0))) |
| 197 | return 0; |
| 198 | |
| 199 | sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX]; |
| 200 | sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX]; |
| 201 | |
| 202 | if (gho0->gho_flags & GHO_F_IP4) |
| 203 | { |
Mohsin Kazmi | f8d421f | 2020-10-06 11:58:40 +0200 | [diff] [blame] | 204 | flags = gro_validate_checksum (vm, b0, gho0, 1); |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 205 | gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, flow_key0, |
| 206 | is_l2); |
| 207 | } |
| 208 | else if (gho0->gho_flags & GHO_F_IP6) |
| 209 | { |
Mohsin Kazmi | f8d421f | 2020-10-06 11:58:40 +0200 | [diff] [blame] | 210 | flags = gro_validate_checksum (vm, b0, gho0, 0); |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 211 | gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, flow_key0, |
| 212 | is_l2); |
| 213 | } |
| 214 | else |
| 215 | return 0; |
| 216 | |
Mohsin Kazmi | f8d421f | 2020-10-06 11:58:40 +0200 | [diff] [blame] | 217 | if ((flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) == 0) |
| 218 | return 0; |
| 219 | |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 220 | pkt_len0 = vlib_buffer_length_in_chain (vm, b0); |
| 221 | if (PREDICT_FALSE (pkt_len0 >= TCP_MAX_GSO_SZ)) |
| 222 | return 0; |
| 223 | |
| 224 | return pkt_len0; |
| 225 | } |
| 226 | |
| 227 | static_always_inline u32 |
| 228 | gro_coalesce_buffers (vlib_main_t * vm, vlib_buffer_t * b0, |
| 229 | vlib_buffer_t * b1, u32 bi1, int is_l2) |
| 230 | { |
| 231 | generic_header_offset_t gho0 = { 0 }; |
| 232 | generic_header_offset_t gho1 = { 0 }; |
| 233 | gro_flow_key_t flow_key0, flow_key1; |
| 234 | ip4_header_t *ip4_0, *ip4_1; |
| 235 | ip6_header_t *ip6_0, *ip6_1; |
| 236 | tcp_header_t *tcp0, *tcp1; |
| 237 | u16 l234_sz0, l234_sz1; |
| 238 | u32 pkt_len0, pkt_len1, payload_len0, payload_len1; |
| 239 | u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 }; |
| 240 | u32 sw_if_index1[VLIB_N_RX_TX] = { ~0 }; |
| 241 | |
| 242 | u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2); |
| 243 | u32 is_ip1 = gro_is_ip4_or_ip6_packet (b1, is_l2); |
| 244 | |
| 245 | if (is_ip0 & VNET_BUFFER_F_IS_IP4) |
| 246 | vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ , |
| 247 | 0 /* is_ip6 */ ); |
| 248 | else if (is_ip0 & VNET_BUFFER_F_IS_IP6) |
| 249 | vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ , |
| 250 | 1 /* is_ip6 */ ); |
| 251 | else |
| 252 | return 0; |
| 253 | |
| 254 | if (is_ip1 & VNET_BUFFER_F_IS_IP4) |
| 255 | vnet_generic_header_offset_parser (b1, &gho1, is_l2, 1 /* is_ip4 */ , |
| 256 | 0 /* is_ip6 */ ); |
| 257 | else if (is_ip1 & VNET_BUFFER_F_IS_IP6) |
| 258 | vnet_generic_header_offset_parser (b1, &gho1, is_l2, 0 /* is_ip4 */ , |
| 259 | 1 /* is_ip6 */ ); |
| 260 | else |
| 261 | return 0; |
| 262 | |
| 263 | pkt_len0 = vlib_buffer_length_in_chain (vm, b0); |
| 264 | pkt_len1 = vlib_buffer_length_in_chain (vm, b1); |
| 265 | |
| 266 | if (((gho0.gho_flags & GHO_F_TCP) == 0) |
| 267 | || ((gho1.gho_flags & GHO_F_TCP) == 0)) |
| 268 | return 0; |
| 269 | |
| 270 | ip4_0 = |
| 271 | (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset); |
| 272 | ip4_1 = |
| 273 | (ip4_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset); |
| 274 | ip6_0 = |
| 275 | (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset); |
| 276 | ip6_1 = |
| 277 | (ip6_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset); |
| 278 | |
| 279 | tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset); |
| 280 | tcp1 = (tcp_header_t *) (vlib_buffer_get_current (b1) + gho1.l4_hdr_offset); |
| 281 | |
| 282 | l234_sz0 = gho0.hdr_sz; |
| 283 | l234_sz1 = gho1.hdr_sz; |
| 284 | |
| 285 | if (gro_is_bad_packet (b0, tcp0->flags, l234_sz0) |
| 286 | || gro_is_bad_packet (b1, tcp1->flags, l234_sz1)) |
| 287 | return 0; |
| 288 | |
| 289 | sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX]; |
| 290 | sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX]; |
| 291 | |
| 292 | sw_if_index1[VLIB_RX] = vnet_buffer (b1)->sw_if_index[VLIB_RX]; |
| 293 | sw_if_index1[VLIB_TX] = vnet_buffer (b1)->sw_if_index[VLIB_TX]; |
| 294 | |
| 295 | if ((gho0.gho_flags & GHO_F_IP4) && (gho1.gho_flags & GHO_F_IP4)) |
| 296 | { |
| 297 | gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, &flow_key0, |
| 298 | is_l2); |
| 299 | gro_get_ip4_flow_from_packet (sw_if_index1, ip4_1, tcp1, &flow_key1, |
| 300 | is_l2); |
| 301 | } |
| 302 | else if ((gho0.gho_flags & GHO_F_IP6) && (gho1.gho_flags & GHO_F_IP6)) |
| 303 | { |
| 304 | gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, &flow_key0, |
| 305 | is_l2); |
| 306 | gro_get_ip6_flow_from_packet (sw_if_index1, ip6_1, tcp1, &flow_key1, |
| 307 | is_l2); |
| 308 | } |
| 309 | else |
| 310 | return 0; |
| 311 | |
| 312 | if (gro_flow_is_equal (&flow_key0, &flow_key1) == 0) |
| 313 | return 0; |
| 314 | |
| 315 | payload_len0 = pkt_len0 - l234_sz0; |
| 316 | payload_len1 = pkt_len1 - l234_sz1; |
| 317 | |
| 318 | if (pkt_len0 >= TCP_MAX_GSO_SZ || pkt_len1 >= TCP_MAX_GSO_SZ |
| 319 | || (pkt_len0 + payload_len1) >= TCP_MAX_GSO_SZ) |
| 320 | return 0; |
| 321 | |
| 322 | if (gro_tcp_sequence_check (tcp0, tcp1, payload_len0) == |
| 323 | GRO_PACKET_ACTION_ENQUEUE) |
| 324 | { |
| 325 | gro_merge_buffers (vm, b0, b1, bi1, payload_len1, l234_sz1); |
| 326 | return tcp1->ack_number; |
| 327 | } |
| 328 | |
| 329 | return 0; |
| 330 | } |
| 331 | |
| 332 | static_always_inline void |
| 333 | gro_fixup_header (vlib_main_t * vm, vlib_buffer_t * b0, u32 ack_number, |
| 334 | int is_l2) |
| 335 | { |
| 336 | generic_header_offset_t gho0 = { 0 }; |
| 337 | |
| 338 | u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2); |
| 339 | |
| 340 | if (is_ip0 & VNET_BUFFER_F_IS_IP4) |
| 341 | vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ , |
| 342 | 0 /* is_ip6 */ ); |
| 343 | else if (is_ip0 & VNET_BUFFER_F_IS_IP6) |
| 344 | vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ , |
| 345 | 1 /* is_ip6 */ ); |
| 346 | |
| 347 | vnet_buffer2 (b0)->gso_size = b0->current_length - gho0.hdr_sz; |
| 348 | |
| 349 | if (gho0.gho_flags & GHO_F_IP4) |
| 350 | { |
| 351 | ip4_header_t *ip4 = |
| 352 | (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset); |
| 353 | ip4->length = |
| 354 | clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - |
| 355 | gho0.l3_hdr_offset); |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 356 | b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4); |
| 357 | vnet_buffer_offload_flags_set (b0, (VNET_BUFFER_OFFLOAD_F_TCP_CKSUM | |
| 358 | VNET_BUFFER_OFFLOAD_F_IP_CKSUM)); |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 359 | } |
| 360 | else if (gho0.gho_flags & GHO_F_IP6) |
| 361 | { |
| 362 | ip6_header_t *ip6 = |
| 363 | (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset); |
| 364 | ip6->payload_length = |
| 365 | clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - |
| 366 | gho0.l4_hdr_offset); |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 367 | b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6); |
| 368 | vnet_buffer_offload_flags_set (b0, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM); |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 369 | } |
| 370 | |
| 371 | tcp_header_t *tcp0 = |
| 372 | (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset); |
| 373 | tcp0->ack_number = ack_number; |
| 374 | b0->flags &= ~VLIB_BUFFER_IS_TRACED; |
| 375 | } |
| 376 | |
| 377 | static_always_inline u32 |
| 378 | vnet_gro_flow_table_flush (vlib_main_t * vm, gro_flow_table_t * flow_table, |
| 379 | u32 * to) |
| 380 | { |
| 381 | if (flow_table->flow_table_size > 0) |
| 382 | { |
| 383 | gro_flow_t *gro_flow; |
| 384 | u32 i = 0, j = 0; |
| 385 | while (i < GRO_FLOW_TABLE_MAX_SIZE) |
| 386 | { |
| 387 | gro_flow = &flow_table->gro_flow[i]; |
| 388 | if (gro_flow->n_buffers && gro_flow_is_timeout (vm, gro_flow)) |
| 389 | { |
| 390 | // flush the packet |
| 391 | vlib_buffer_t *b0 = |
| 392 | vlib_get_buffer (vm, gro_flow->buffer_index); |
| 393 | gro_fixup_header (vm, b0, gro_flow->last_ack_number, |
| 394 | flow_table->is_l2); |
| 395 | to[j] = gro_flow->buffer_index; |
| 396 | gro_flow_table_reset_flow (flow_table, gro_flow); |
| 397 | flow_table->n_vectors++; |
| 398 | j++; |
| 399 | } |
| 400 | i++; |
| 401 | } |
| 402 | |
| 403 | return j; |
| 404 | } |
| 405 | return 0; |
| 406 | } |
| 407 | |
| 408 | static_always_inline void |
| 409 | vnet_gro_flow_table_schedule_node_on_dispatcher (vlib_main_t * vm, |
| 410 | gro_flow_table_t * |
| 411 | flow_table) |
| 412 | { |
| 413 | if (gro_flow_table_is_timeout (vm, flow_table)) |
| 414 | { |
| 415 | u32 to[GRO_FLOW_TABLE_MAX_SIZE] = { 0 }; |
| 416 | u32 n_to = vnet_gro_flow_table_flush (vm, flow_table, to); |
| 417 | |
| 418 | if (n_to > 0) |
| 419 | { |
| 420 | u32 node_index = flow_table->node_index; |
| 421 | vlib_frame_t *f = vlib_get_frame_to_node (vm, node_index); |
| 422 | u32 *f_to = vlib_frame_vector_args (f); |
| 423 | u32 i = 0; |
| 424 | |
| 425 | while (i < n_to) |
| 426 | { |
| 427 | f_to[f->n_vectors] = to[i]; |
| 428 | i++; |
| 429 | f->n_vectors++; |
| 430 | } |
| 431 | vlib_put_frame_to_node (vm, node_index, f); |
| 432 | } |
| 433 | gro_flow_table_set_timeout (vm, flow_table, GRO_FLOW_TABLE_FLUSH); |
| 434 | } |
| 435 | } |
| 436 | |
| 437 | static_always_inline u32 |
| 438 | vnet_gro_flow_table_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, |
| 439 | u32 bi0, u32 * to) |
| 440 | { |
| 441 | vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); |
| 442 | generic_header_offset_t gho0 = { 0 }; |
| 443 | gro_flow_t *gro_flow = 0; |
| 444 | gro_flow_key_t flow_key0 = { }; |
| 445 | tcp_header_t *tcp0 = 0; |
| 446 | u32 pkt_len0 = 0; |
| 447 | int is_l2 = flow_table->is_l2; |
| 448 | |
| 449 | if (!gro_flow_table_is_enable (flow_table)) |
| 450 | { |
| 451 | to[0] = bi0; |
| 452 | return 1; |
| 453 | } |
| 454 | |
| 455 | if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO)) |
| 456 | { |
| 457 | to[0] = bi0; |
| 458 | return 1; |
| 459 | } |
| 460 | |
| 461 | pkt_len0 = gro_get_packet_data (vm, b0, &gho0, &flow_key0, is_l2); |
| 462 | if (pkt_len0 == 0) |
| 463 | { |
| 464 | to[0] = bi0; |
| 465 | return 1; |
| 466 | } |
| 467 | |
| 468 | gro_flow = gro_flow_table_find_or_add_flow (flow_table, &flow_key0); |
| 469 | if (!gro_flow) |
| 470 | { |
| 471 | to[0] = bi0; |
| 472 | return 1; |
| 473 | } |
| 474 | |
| 475 | if (PREDICT_FALSE (gro_flow->n_buffers == 0)) |
| 476 | { |
| 477 | flow_table->total_vectors++; |
| 478 | gro_flow_store_packet (gro_flow, bi0); |
| 479 | tcp0 = |
| 480 | (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset); |
| 481 | gro_flow->last_ack_number = tcp0->ack_number; |
| 482 | gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT); |
| 483 | return 0; |
| 484 | } |
| 485 | else |
| 486 | { |
| 487 | tcp0 = |
| 488 | (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset); |
| 489 | generic_header_offset_t gho_s = { 0 }; |
| 490 | tcp_header_t *tcp_s; |
| 491 | u16 l234_sz0, l234_sz_s; |
| 492 | u32 pkt_len_s, payload_len0, payload_len_s; |
| 493 | u32 bi_s = gro_flow->buffer_index; |
| 494 | |
| 495 | vlib_buffer_t *b_s = vlib_get_buffer (vm, bi_s); |
| 496 | u32 is_ip_s = gro_is_ip4_or_ip6_packet (b_s, is_l2); |
| 497 | if (is_ip_s & VNET_BUFFER_F_IS_IP4) |
| 498 | vnet_generic_header_offset_parser (b_s, &gho_s, is_l2, |
| 499 | 1 /* is_ip4 */ , 0 /* is_ip6 */ ); |
| 500 | else if (is_ip_s & VNET_BUFFER_F_IS_IP6) |
| 501 | vnet_generic_header_offset_parser (b_s, &gho_s, is_l2, |
| 502 | 0 /* is_ip4 */ , 1 /* is_ip6 */ ); |
| 503 | |
| 504 | tcp_s = |
| 505 | (tcp_header_t *) (vlib_buffer_get_current (b_s) + |
| 506 | gho_s.l4_hdr_offset); |
| 507 | pkt_len_s = vlib_buffer_length_in_chain (vm, b_s); |
| 508 | l234_sz0 = gho0.hdr_sz; |
| 509 | l234_sz_s = gho_s.hdr_sz; |
| 510 | payload_len0 = pkt_len0 - l234_sz0; |
| 511 | payload_len_s = pkt_len_s - l234_sz_s; |
| 512 | gro_packet_action_t action = |
| 513 | gro_tcp_sequence_check (tcp_s, tcp0, payload_len_s); |
| 514 | |
| 515 | if (PREDICT_TRUE (action == GRO_PACKET_ACTION_ENQUEUE)) |
| 516 | { |
Mohsin Kazmi | 9314ed8 | 2021-05-05 16:25:39 +0000 | [diff] [blame^] | 517 | if (PREDICT_TRUE (((pkt_len_s + payload_len0) < TCP_MAX_GSO_SZ) && |
| 518 | gro_flow->n_buffers < GRO_FLOW_N_BUFFERS)) |
Mohsin Kazmi | f382b06 | 2020-08-11 15:00:44 +0200 | [diff] [blame] | 519 | { |
| 520 | flow_table->total_vectors++; |
| 521 | gro_merge_buffers (vm, b_s, b0, bi0, payload_len0, l234_sz0); |
| 522 | gro_flow_store_packet (gro_flow, bi0); |
| 523 | gro_flow->last_ack_number = tcp0->ack_number; |
| 524 | return 0; |
| 525 | } |
| 526 | else |
| 527 | { |
| 528 | // flush the stored GSO size packet and buffer the current packet |
| 529 | flow_table->n_vectors++; |
| 530 | flow_table->total_vectors++; |
| 531 | gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2); |
| 532 | gro_flow->n_buffers = 0; |
| 533 | gro_flow_store_packet (gro_flow, bi0); |
| 534 | gro_flow->last_ack_number = tcp0->ack_number; |
| 535 | gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT); |
| 536 | to[0] = bi_s; |
| 537 | return 1; |
| 538 | } |
| 539 | } |
| 540 | else |
| 541 | { |
| 542 | // flush the all (current and stored) packets |
| 543 | flow_table->n_vectors++; |
| 544 | flow_table->total_vectors++; |
| 545 | gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2); |
| 546 | gro_flow->n_buffers = 0; |
| 547 | gro_flow_table_reset_flow (flow_table, gro_flow); |
| 548 | to[0] = bi_s; |
| 549 | to[1] = bi0; |
| 550 | return 2; |
| 551 | } |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | /** |
| 556 | * coalesce buffers with flow tables |
| 557 | */ |
| 558 | static_always_inline u32 |
| 559 | vnet_gro_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, u32 * from, |
| 560 | u16 n_left_from, u32 * to) |
| 561 | { |
| 562 | u16 count = 0, i = 0; |
| 563 | |
| 564 | for (i = 0; i < n_left_from; i++) |
| 565 | count += vnet_gro_flow_table_inline (vm, flow_table, from[i], &to[count]); |
| 566 | |
| 567 | return count; |
| 568 | } |
| 569 | |
| 570 | /** |
| 571 | * coalesce buffers in opportunistic way without flow tables |
| 572 | */ |
| 573 | static_always_inline u32 |
| 574 | vnet_gro_simple_inline (vlib_main_t * vm, u32 * from, u16 n_left_from, |
| 575 | int is_l2) |
| 576 | { |
| 577 | vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; |
| 578 | vlib_get_buffers (vm, from, b, n_left_from); |
| 579 | u32 bi = 1, ack_number = 0; |
| 580 | if (PREDICT_TRUE (((b[0]->flags & VNET_BUFFER_F_GSO) == 0))) |
| 581 | { |
| 582 | while (n_left_from > 1) |
| 583 | { |
| 584 | if (PREDICT_TRUE (((b[bi]->flags & VNET_BUFFER_F_GSO) == 0))) |
| 585 | { |
| 586 | u32 ret; |
| 587 | if ((ret = |
| 588 | gro_coalesce_buffers (vm, b[0], b[bi], from[bi], |
| 589 | is_l2)) != 0) |
| 590 | { |
| 591 | n_left_from -= 1; |
| 592 | bi += 1; |
| 593 | ack_number = ret; |
| 594 | continue; |
| 595 | } |
| 596 | else |
| 597 | break; |
| 598 | } |
| 599 | else |
| 600 | break; |
| 601 | } |
| 602 | |
| 603 | if (bi >= 2) |
| 604 | { |
| 605 | gro_fixup_header (vm, b[0], ack_number, is_l2); |
| 606 | } |
| 607 | } |
| 608 | return bi; |
| 609 | } |
| 610 | #endif /* included_gro_func_h */ |
| 611 | |
| 612 | /* |
| 613 | * fd.io coding-style-patch-verification: ON |
| 614 | * |
| 615 | * Local Variables: |
| 616 | * eval: (c-set-style "gnu") |
| 617 | * End: |
| 618 | */ |