blob: 041fab3bcc4ea6570410f19b0a1a622923c493f2 [file] [log] [blame]
Mohsin Kazmi29467b52019-10-08 19:42:38 +02001/*
2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_gso_h
17#define included_gso_h
18
19#include <vnet/vnet.h>
Mohsin Kazmid9e7ac32021-10-15 22:45:51 +000020#include <vnet/gso/hdr_offset_parser.h>
Mohsin Kazmid431d742021-10-21 20:20:24 +000021#include <vnet/ip/ip_psh_cksum.h>
Mohsin Kazmi29467b52019-10-08 19:42:38 +020022
23typedef struct
24{
25 vlib_main_t *vlib_main;
26 vnet_main_t *vnet_main;
27 u16 msg_id_base;
28} gso_main_t;
29
30extern gso_main_t gso_main;
31
32int vnet_sw_interface_gso_enable_disable (u32 sw_if_index, u8 enable);
Mohsin Kazmid9e7ac32021-10-15 22:45:51 +000033u32 gso_segment_buffer (vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd,
34 u32 bi, vlib_buffer_t *b, generic_header_offset_t *gho,
35 u32 n_bytes_b, u8 is_l2, u8 is_ip6);
Mohsin Kazmi29467b52019-10-08 19:42:38 +020036
Mohsin Kazmid431d742021-10-21 20:20:24 +000037static_always_inline void
38gso_init_bufs_from_template_base (vlib_buffer_t **bufs, vlib_buffer_t *b0,
39 u32 flags, u16 n_bufs, u16 hdr_sz)
40{
41 u32 i = n_bufs;
42 while (i >= 4)
43 {
44 /* prefetches */
45 CLIB_PREFETCH (bufs[2], 2 * CLIB_CACHE_LINE_BYTES, LOAD);
46 CLIB_PREFETCH (bufs[3], 2 * CLIB_CACHE_LINE_BYTES, LOAD);
47 vlib_prefetch_buffer_data (bufs[2], LOAD);
48 vlib_prefetch_buffer_data (bufs[3], LOAD);
49
50 /* copying objects from cacheline 0 */
51 bufs[0]->current_data = 0;
52 bufs[1]->current_data = 0;
53
54 bufs[0]->current_length = hdr_sz;
55 bufs[1]->current_length = hdr_sz;
56
57 bufs[0]->flags = bufs[1]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
58 bufs[0]->flow_id = bufs[1]->flow_id = b0->flow_id;
59 bufs[0]->error = bufs[1]->error = b0->error;
60 bufs[0]->current_config_index = bufs[1]->current_config_index =
61 b0->current_config_index;
62
63 clib_memcpy_fast (&bufs[0]->opaque, &b0->opaque, sizeof (b0->opaque));
64 clib_memcpy_fast (&bufs[1]->opaque, &b0->opaque, sizeof (b0->opaque));
65
66 /* copying objects from cacheline 1 */
67 bufs[0]->trace_handle = b0->trace_handle;
68 bufs[1]->trace_handle = b0->trace_handle;
69
70 bufs[0]->total_length_not_including_first_buffer = 0;
71 bufs[1]->total_length_not_including_first_buffer = 0;
72
73 /* copying data */
74 clib_memcpy_fast (bufs[0]->data, vlib_buffer_get_current (b0), hdr_sz);
75 clib_memcpy_fast (bufs[1]->data, vlib_buffer_get_current (b0), hdr_sz);
76
77 bufs += 2;
78 i -= 2;
79 }
80
81 while (i > 0)
82 {
83 /* copying objects from cacheline 0 */
84 bufs[0]->current_data = 0;
85 bufs[0]->current_length = hdr_sz;
86 bufs[0]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
87 bufs[0]->flow_id = b0->flow_id;
88 bufs[0]->error = b0->error;
89 bufs[0]->current_config_index = b0->current_config_index;
90 clib_memcpy_fast (&bufs[0]->opaque, &b0->opaque, sizeof (b0->opaque));
91
92 /* copying objects from cacheline 1 */
93 bufs[0]->trace_handle = b0->trace_handle;
94 bufs[0]->total_length_not_including_first_buffer = 0;
95
96 /* copying data */
97 clib_memcpy_fast (bufs[0]->data, vlib_buffer_get_current (b0), hdr_sz);
98
99 bufs++;
100 i--;
101 }
102}
103
104static_always_inline void
105gso_fixup_segmented_buf (vlib_main_t *vm, vlib_buffer_t *b0, u32 next_tcp_seq,
106 int is_l2, int is_ip6, generic_header_offset_t *gho,
107 clib_ip_csum_t *c, u8 tcp_flags)
108{
109
110 ip4_header_t *ip4 =
111 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset +
112 gho->outer_hdr_sz);
113 ip6_header_t *ip6 =
114 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset +
115 gho->outer_hdr_sz);
116 tcp_header_t *tcp =
117 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset +
118 gho->outer_hdr_sz);
119
120 tcp->flags = tcp_flags;
121 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
122
123 if (is_ip6)
124 {
125 ip6->payload_length = clib_host_to_net_u16 (
126 b0->current_length - gho->l4_hdr_offset - gho->outer_hdr_sz);
127 vnet_buffer_offload_flags_clear (b0, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
128 ip6_psh_t psh = { 0 };
129 u32 *p = (u32 *) &psh;
130 psh.src = ip6->src_address;
131 psh.dst = ip6->dst_address;
132 psh.l4len = ip6->payload_length;
133 psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol);
134 for (int i = 0; i < 10; i++)
135 c->sum += p[i];
136 }
137 else
138 {
139 ip4->length = clib_host_to_net_u16 (
140 b0->current_length - gho->l3_hdr_offset - gho->outer_hdr_sz);
141 if (gho->gho_flags & GHO_F_IP4)
142 ip4->checksum = ip4_header_checksum (ip4);
143 vnet_buffer_offload_flags_clear (b0, (VNET_BUFFER_OFFLOAD_F_IP_CKSUM |
144 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM));
145 c->sum += clib_mem_unaligned (&ip4->src_address, u32);
146 c->sum += clib_mem_unaligned (&ip4->dst_address, u32);
147 c->sum += clib_host_to_net_u32 (
148 (clib_net_to_host_u16 (ip4->length) - ip4_header_bytes (ip4)) +
149 (ip4->protocol << 16));
150 }
151 clib_ip_csum_chunk (c, (u8 *) tcp, gho->l4_hdr_sz);
152 tcp->checksum = clib_ip_csum_fold (c);
153
154 if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
155 {
156 u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
157
158 ip_adjacency_t *adj0 = adj_get (adj_index0);
159
160 if (adj0->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN &&
161 adj0->sub_type.midchain.fixup_func)
162 /* calls e.g. ipip44_fixup */
163 adj0->sub_type.midchain.fixup_func (
164 vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
165 }
166}
167
168static_always_inline u32
169gso_segment_buffer_inline (vlib_main_t *vm,
170 vnet_interface_per_thread_data_t *ptd,
171 vlib_buffer_t *b, generic_header_offset_t *gho,
172 int is_l2, int is_ip6)
173{
174 vlib_buffer_t **bufs = 0;
175 u32 n_tx_bytes = 0;
176 u16 gso_size = vnet_buffer2 (b)->gso_size;
177 u8 tcp_flags = 0, tcp_flags_no_fin_psh = 0;
178 u32 default_bflags =
179 b->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
180 u16 hdr_sz = gho->hdr_sz + gho->outer_hdr_sz;
181 u32 next_tcp_seq = 0, tcp_seq = 0;
182 u32 data_size = vlib_buffer_length_in_chain (vm, b) - hdr_sz;
183 u16 size =
184 clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - hdr_sz);
185 u16 n_alloc = 0, n_bufs = ((data_size + size - 1) / size);
186 clib_ip_csum_t c = { .sum = 0, .odd = 0 };
187 u8 *src_ptr, *dst_ptr;
188 u16 src_left, dst_left, bytes_to_copy;
189 u32 i = 0;
190
191 vec_validate (ptd->split_buffers, n_bufs - 1);
192 n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
193 if (n_alloc < n_bufs)
194 {
195 vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
196 return 0;
197 }
198
199 vec_validate (bufs, n_bufs - 1);
200 vlib_get_buffers (vm, ptd->split_buffers, bufs, n_bufs);
201
202 tcp_header_t *tcp =
203 (tcp_header_t *) (vlib_buffer_get_current (b) + gho->l4_hdr_offset +
204 gho->outer_hdr_sz);
205 tcp_seq = next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
206 /* store original flags for last packet and reset FIN and PSH */
207 tcp_flags = tcp->flags;
208 tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
209 tcp->checksum = 0;
210
211 gso_init_bufs_from_template_base (bufs, b, default_bflags, n_bufs, hdr_sz);
212
213 src_ptr = vlib_buffer_get_current (b) + hdr_sz;
214 src_left = b->current_length - hdr_sz;
215 dst_ptr = vlib_buffer_get_current (bufs[i]) + hdr_sz;
216 dst_left = size;
217
218 while (data_size)
219 {
220 bytes_to_copy = clib_min (src_left, dst_left);
221 clib_ip_csum_and_copy_chunk (&c, src_ptr, dst_ptr, bytes_to_copy);
222
223 src_left -= bytes_to_copy;
224 src_ptr += bytes_to_copy;
225 data_size -= bytes_to_copy;
226 dst_left -= bytes_to_copy;
227 dst_ptr += bytes_to_copy;
228 next_tcp_seq += bytes_to_copy;
229 bufs[i]->current_length += bytes_to_copy;
230
231 if (0 == src_left)
232 {
233 /* init src to the next buffer in chain */
234 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
235 {
236 b = vlib_get_buffer (vm, b->next_buffer);
237 src_left = b->current_length;
238 src_ptr = vlib_buffer_get_current (b);
239 }
240 else
241 {
242 ASSERT (data_size == 0);
243 break;
244 }
245 }
246 if (0 == dst_left && data_size)
247 {
248 vlib_prefetch_buffer_header (bufs[i + 1], LOAD);
249 vlib_prefetch_buffer_data (bufs[i + 1], LOAD);
250
251 n_tx_bytes += bufs[i]->current_length;
252 gso_fixup_segmented_buf (vm, bufs[i], tcp_seq, is_l2, is_ip6, gho,
253 &c, tcp_flags_no_fin_psh);
254 i++;
255 dst_left = size;
256 dst_ptr = vlib_buffer_get_current (bufs[i]) + hdr_sz;
257 tcp_seq = next_tcp_seq;
258 // reset clib_ip_csum_t
259 c.odd = 0;
260 c.sum = 0;
261 }
262 }
263
264 ASSERT ((i + 1) == n_alloc);
265 n_tx_bytes += bufs[i]->current_length;
266 gso_fixup_segmented_buf (vm, bufs[i], tcp_seq, is_l2, is_ip6, gho, &c,
267 tcp_flags);
268
269 vec_free (bufs);
270 return n_tx_bytes;
271}
272
Mohsin Kazmi29467b52019-10-08 19:42:38 +0200273#endif /* included_gso_h */
274
275/*
276 * fd.io coding-style-patch-verification: ON
277 *
278 * Local Variables:
279 * eval: (c-set-style "gnu")
280 * End:
281 */