blob: b2076e60de4da625124e4587467ebc7d874f748f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Klement Sekeraf883f6a2019-02-13 11:01:32 +010045#include <vlib/buffer.h>
46#include <vlib/physmem_funcs.h>
47#include <vlib/main.h>
48#include <vlib/node.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
50/** \file
51 vlib buffer access methods.
52*/
53
Damjan Marion24dcbe42019-01-31 12:29:39 +010054always_inline void
55vlib_buffer_validate (vlib_main_t * vm, vlib_buffer_t * b)
56{
57 vlib_buffer_main_t *bm = vm->buffer_main;
58 vlib_buffer_pool_t *bp;
59
60 /* reference count in allocated buffer always must be 1 or higher */
61 ASSERT (b->ref_count > 0);
62
Damjan Marion24dcbe42019-01-31 12:29:39 +010063 /* verify that buffer pool index is valid */
64 bp = vec_elt_at_index (bm->buffer_pools, b->buffer_pool_index);
65 ASSERT (pointer_to_uword (b) >= bp->start);
66 ASSERT (pointer_to_uword (b) < bp->start + bp->size -
Damjan Marion5de3fec2019-02-06 14:22:32 +010067 (bp->data_size + sizeof (vlib_buffer_t)));
Damjan Marion24dcbe42019-01-31 12:29:39 +010068}
69
70always_inline void *
71vlib_buffer_ptr_from_index (uword buffer_mem_start, u32 buffer_index,
72 uword offset)
73{
74 offset += ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
75 return uword_to_pointer (buffer_mem_start + offset, vlib_buffer_t *);
76}
Ed Warnickecb9cada2015-12-08 15:45:58 -070077
78/** \brief Translate buffer index into buffer pointer
79
80 @param vm - (vlib_main_t *) vlib main data structure pointer
81 @param buffer_index - (u32) buffer index
82 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040083*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070084always_inline vlib_buffer_t *
85vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
86{
Damjan Mariond50e3472019-01-20 00:03:56 +010087 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion24dcbe42019-01-31 12:29:39 +010088 vlib_buffer_t *b;
Damjan Marion04a7f052017-07-10 15:06:17 +020089
Damjan Marion24dcbe42019-01-31 12:29:39 +010090 b = vlib_buffer_ptr_from_index (bm->buffer_mem_start, buffer_index, 0);
91 vlib_buffer_validate (vm, b);
92 return b;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093}
94
Damjan Marion5de3fec2019-02-06 14:22:32 +010095static_always_inline u32
Damjan Marion8934a042019-02-09 23:29:26 +010096vlib_buffer_get_default_data_size (vlib_main_t * vm)
Damjan Marion5de3fec2019-02-06 14:22:32 +010097{
98 return vm->buffer_main->default_data_size;
99}
100
Damjan Marione58041f2019-01-18 19:56:09 +0100101static_always_inline void
Damjan Marion64d557c2019-01-18 20:03:41 +0100102vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices)
103{
Damjan Marion3e94c412019-03-13 12:41:54 +0100104#if defined(CLIB_HAVE_VEC512)
105 while (n_indices >= 16)
106 {
107 u32x16_store_unaligned (u32x16_load_unaligned (src), dst);
108 dst += 16;
109 src += 16;
110 n_indices -= 16;
111 }
112#endif
113
114#if defined(CLIB_HAVE_VEC256)
115 while (n_indices >= 8)
116 {
117 u32x8_store_unaligned (u32x8_load_unaligned (src), dst);
118 dst += 8;
119 src += 8;
120 n_indices -= 8;
121 }
122#endif
123
124#if defined(CLIB_HAVE_VEC128)
125 while (n_indices >= 4)
126 {
127 u32x4_store_unaligned (u32x4_load_unaligned (src), dst);
128 dst += 4;
129 src += 4;
130 n_indices -= 4;
131 }
132#endif
133
134 while (n_indices)
135 {
136 dst[0] = src[0];
137 dst += 1;
138 src += 1;
139 n_indices -= 1;
140 }
Damjan Marion64d557c2019-01-18 20:03:41 +0100141}
142
Damjan Marionb9250a92020-03-02 19:02:15 +0100143always_inline void
144vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start,
145 u32 ring_size, u32 n_buffers)
146{
147 ASSERT (n_buffers <= ring_size);
148
149 if (PREDICT_TRUE (start + n_buffers <= ring_size))
150 {
151 vlib_buffer_copy_indices (dst, ring + start, n_buffers);
152 }
153 else
154 {
155 u32 n = ring_size - start;
156 vlib_buffer_copy_indices (dst, ring + start, n);
157 vlib_buffer_copy_indices (dst + n, ring, n_buffers - n);
158 }
159}
160
Benoît Ganne72f49212020-03-19 11:41:07 +0100161always_inline void
162vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start,
163 u32 ring_size, u32 n_buffers)
164{
165 ASSERT (n_buffers <= ring_size);
166
167 if (PREDICT_TRUE (start + n_buffers <= ring_size))
168 {
169 vlib_buffer_copy_indices (ring + start, src, n_buffers);
170 }
171 else
172 {
173 u32 n = ring_size - start;
174 vlib_buffer_copy_indices (ring + start, src, n);
175 vlib_buffer_copy_indices (ring, src + n, n_buffers - n);
176 }
177}
178
Damjan Marion9a8a12a2019-01-23 16:52:10 +0100179STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
Damjan Marion64d557c2019-01-18 20:03:41 +0100180static_always_inline void
Damjan Marione58041f2019-01-18 19:56:09 +0100181vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
182{
Damjan Marion22f23ae2019-01-24 15:36:57 +0100183#if defined CLIB_HAVE_VEC512
184 b->as_u8x64[0] = bt->as_u8x64[0];
185#elif defined (CLIB_HAVE_VEC256)
Damjan Marion9a8a12a2019-01-23 16:52:10 +0100186 b->as_u8x32[0] = bt->as_u8x32[0];
187 b->as_u8x32[1] = bt->as_u8x32[1];
188#elif defined (CLIB_HAVE_VEC128)
189 b->as_u8x16[0] = bt->as_u8x16[0];
190 b->as_u8x16[1] = bt->as_u8x16[1];
191 b->as_u8x16[2] = bt->as_u8x16[2];
192 b->as_u8x16[3] = bt->as_u8x16[3];
193#else
194 clib_memcpy_fast (b, bt, 64);
195#endif
Damjan Marione58041f2019-01-18 19:56:09 +0100196}
197
Damjan Marion910d3692019-01-21 11:48:34 +0100198always_inline u8
199vlib_buffer_pool_get_default_for_numa (vlib_main_t * vm, u32 numa_node)
200{
Damjan Marionb592d1b2019-02-28 23:16:11 +0100201 ASSERT (numa_node < VLIB_BUFFER_MAX_NUMA_NODES);
202 return vm->buffer_main->default_buffer_pool_index_for_numa[numa_node];
Damjan Marion910d3692019-01-21 11:48:34 +0100203}
204
Damjan Marionafe56de2018-05-17 12:44:00 +0200205/** \brief Translate array of buffer indices into buffer pointers with offset
206
207 @param vm - (vlib_main_t *) vlib main data structure pointer
208 @param bi - (u32 *) array of buffer indices
209 @param b - (void **) array to store buffer pointers
210 @param count - (uword) number of elements
211 @param offset - (i32) offset applied to each pointer
212*/
213static_always_inline void
214vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
215 i32 offset)
216{
Damjan Mariond50e3472019-01-20 00:03:56 +0100217 uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
Damjan Marionafe56de2018-05-17 12:44:00 +0200218#ifdef CLIB_HAVE_VEC256
Damjan Mariond50e3472019-01-20 00:03:56 +0100219 u64x4 off = u64x4_splat (buffer_mem_start + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200220 /* if count is not const, compiler will not unroll while loop
221 se we maintain two-in-parallel variant */
222 while (count >= 8)
223 {
224 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
225 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
226 /* shift and add to get vlib_buffer_t pointer */
227 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
228 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
229 b += 8;
230 bi += 8;
231 count -= 8;
232 }
233#endif
234 while (count >= 4)
235 {
236#ifdef CLIB_HAVE_VEC256
237 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
238 /* shift and add to get vlib_buffer_t pointer */
239 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800240#elif defined (CLIB_HAVE_VEC128)
Damjan Mariond50e3472019-01-20 00:03:56 +0100241 u64x2 off = u64x2_splat (buffer_mem_start + offset);
Damjan Marion5df580e2018-07-27 01:47:57 +0200242 u32x4 bi4 = u32x4_load_unaligned (bi);
243 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800244#if defined (__aarch64__)
245 u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
246#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200247 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
248 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800249#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200250 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
251 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200252#else
Damjan Marion24dcbe42019-01-31 12:29:39 +0100253 b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
254 b[1] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[1], offset);
255 b[2] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[2], offset);
256 b[3] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[3], offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200257#endif
258 b += 4;
259 bi += 4;
260 count -= 4;
261 }
262 while (count)
263 {
Damjan Marion24dcbe42019-01-31 12:29:39 +0100264 b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200265 b += 1;
266 bi += 1;
267 count -= 1;
268 }
269}
270
271/** \brief Translate array of buffer indices into buffer pointers
272
273 @param vm - (vlib_main_t *) vlib main data structure pointer
274 @param bi - (u32 *) array of buffer indices
275 @param b - (vlib_buffer_t **) array to store buffer pointers
276 @param count - (uword) number of elements
277*/
278
279static_always_inline void
280vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
281{
282 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
283}
284
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285/** \brief Translate buffer pointer into buffer index
286
287 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400288 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700289 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400290*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200291
Ed Warnickecb9cada2015-12-08 15:45:58 -0700292always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400293vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700294{
Damjan Mariond50e3472019-01-20 00:03:56 +0100295 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200296 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
297 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
298 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400299 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700300 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
301}
302
Damjan Marionafe56de2018-05-17 12:44:00 +0200303/** \brief Translate array of buffer pointers into buffer indices with offset
304
305 @param vm - (vlib_main_t *) vlib main data structure pointer
306 @param b - (void **) array of buffer pointers
307 @param bi - (u32 *) array to store buffer indices
308 @param count - (uword) number of elements
309 @param offset - (i32) offset applied to each pointer
310*/
311static_always_inline void
312vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
313 uword count, i32 offset)
314{
315#ifdef CLIB_HAVE_VEC256
316 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
Damjan Mariond50e3472019-01-20 00:03:56 +0100317 u64x4 off4 = u64x4_splat (vm->buffer_main->buffer_mem_start - offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200318
319 while (count >= 8)
320 {
321 /* load 4 pointers into 256-bit register */
322 u64x4 v0 = u64x4_load_unaligned (b);
323 u64x4 v1 = u64x4_load_unaligned (b + 4);
324 u32x8 v2, v3;
325
326 v0 -= off4;
327 v1 -= off4;
328
329 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
330 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
331
332 /* permute 256-bit register so lower u32s of each buffer index are
333 * placed into lower 128-bits */
334 v2 = u32x8_permute ((u32x8) v0, mask);
335 v3 = u32x8_permute ((u32x8) v1, mask);
336
337 /* extract lower 128-bits and save them to the array of buffer indices */
338 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
339 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
340 bi += 8;
341 b += 8;
342 count -= 8;
343 }
344#endif
345 while (count >= 4)
346 {
347 /* equivalent non-nector implementation */
348 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
349 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
350 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
351 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
352 bi += 4;
353 b += 4;
354 count -= 4;
355 }
356 while (count)
357 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400358 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200359 bi += 1;
360 b += 1;
361 count -= 1;
362 }
363}
364
365/** \brief Translate array of buffer pointers into buffer indices
366
367 @param vm - (vlib_main_t *) vlib main data structure pointer
368 @param b - (vlib_buffer_t **) array of buffer pointers
369 @param bi - (u32 *) array to store buffer indices
370 @param count - (uword) number of elements
371*/
372static_always_inline void
373vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
374 uword count)
375{
376 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
377}
378
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379/** \brief Get next buffer in buffer linklist, or zero for end of list.
380
381 @param vm - (vlib_main_t *) vlib main data structure pointer
382 @param b - (void *) buffer pointer
383 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400384*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385always_inline vlib_buffer_t *
386vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
387{
388 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400389 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390}
391
Dave Barach9b8ffd92016-07-08 08:13:45 -0400392uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
393 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700394
395/** \brief Get length in bytes of the buffer chain
396
397 @param vm - (vlib_main_t *) vlib main data structure pointer
398 @param b - (void *) buffer pointer
399 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400400*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700401always_inline uword
402vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
403{
Damjan Marion072401e2017-07-13 18:53:27 +0200404 uword len = b->current_length;
405
406 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
407 return len;
408
409 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
410 return len + b->total_length_not_including_first_buffer;
411
412 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700413}
414
415/** \brief Get length in bytes of the buffer index buffer chain
416
417 @param vm - (vlib_main_t *) vlib main data structure pointer
418 @param bi - (u32) buffer index
419 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400420*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421always_inline uword
422vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
423{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400424 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700425 return vlib_buffer_length_in_chain (vm, b);
426}
427
428/** \brief Copy buffer contents to memory
429
430 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400431 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700432 @param contents - (u8 *) memory, <strong>must be large enough</strong>
433 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400434*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435always_inline uword
436vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
437{
438 uword content_len = 0;
439 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400440 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441
442 while (1)
443 {
444 b = vlib_get_buffer (vm, buffer_index);
445 l = b->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500446 clib_memcpy_fast (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400448 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700449 break;
450 buffer_index = b->next_buffer;
451 }
452
453 return content_len;
454}
455
Damjan Marion8f499362018-10-22 13:07:02 +0200456always_inline uword
457vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458{
Damjan Marion68b4da62018-09-30 18:26:20 +0200459 return vlib_physmem_get_pa (vm, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700460}
461
Damjan Marion8f499362018-10-22 13:07:02 +0200462always_inline uword
463vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b)
464{
465 return vlib_buffer_get_pa (vm, b) + b->current_data;
466}
467
Ed Warnickecb9cada2015-12-08 15:45:58 -0700468/** \brief Prefetch buffer metadata by buffer index
469 The first 64 bytes of buffer contains most header information
470
471 @param vm - (vlib_main_t *) vlib main data structure pointer
472 @param bi - (u32) buffer index
473 @param type - LOAD, STORE. In most cases, STORE is the right answer
474*/
475/* Prefetch buffer header given index. */
476#define vlib_prefetch_buffer_with_index(vm,bi,type) \
477 do { \
478 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
479 vlib_prefetch_buffer_header (_b, type); \
480 } while (0)
481
Dave Barach9b8ffd92016-07-08 08:13:45 -0400482typedef enum
483{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700484 /* Index is unknown. */
485 VLIB_BUFFER_UNKNOWN,
486
487 /* Index is known and free/allocated. */
488 VLIB_BUFFER_KNOWN_FREE,
489 VLIB_BUFFER_KNOWN_ALLOCATED,
490} vlib_buffer_known_state_t;
491
Damjan Marionc8a26c62017-11-24 20:15:23 +0100492void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
493 uword n_buffers,
494 vlib_buffer_known_state_t
495 expected_state);
496
Ed Warnickecb9cada2015-12-08 15:45:58 -0700497always_inline vlib_buffer_known_state_t
Damjan Mariond50e3472019-01-20 00:03:56 +0100498vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700499{
Damjan Mariond50e3472019-01-20 00:03:56 +0100500 vlib_buffer_main_t *bm = vm->buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700501
Damjan Marion6b0f5892017-07-27 04:01:24 -0400502 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400503 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400504 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
506}
507
Ed Warnickecb9cada2015-12-08 15:45:58 -0700508/* Validates sanity of a single buffer.
509 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400510u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
511 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700512
Benoît Ganne2b65f9c2019-11-21 16:53:31 +0100513u8 *vlib_validate_buffers (vlib_main_t * vm,
514 u32 * buffers,
515 uword next_buffer_stride,
516 uword n_buffers,
517 vlib_buffer_known_state_t known_state,
518 uword follow_buffer_next);
519
Damjan Marion910d3692019-01-21 11:48:34 +0100520static_always_inline vlib_buffer_pool_t *
521vlib_get_buffer_pool (vlib_main_t * vm, u8 buffer_pool_index)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400522{
Damjan Marion910d3692019-01-21 11:48:34 +0100523 vlib_buffer_main_t *bm = vm->buffer_main;
524 return vec_elt_at_index (bm->buffer_pools, buffer_pool_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400525}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700526
Damjan Marion910d3692019-01-21 11:48:34 +0100527static_always_inline uword
528vlib_buffer_pool_get (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers,
529 u32 n_buffers)
530{
531 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
532 u32 len;
533
534 ASSERT (bp->buffers);
535
536 clib_spinlock_lock (&bp->lock);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100537 len = bp->n_avail;
Damjan Marion910d3692019-01-21 11:48:34 +0100538 if (PREDICT_TRUE (n_buffers < len))
539 {
540 len -= n_buffers;
541 vlib_buffer_copy_indices (buffers, bp->buffers + len, n_buffers);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100542 bp->n_avail = len;
Damjan Marion910d3692019-01-21 11:48:34 +0100543 clib_spinlock_unlock (&bp->lock);
544 return n_buffers;
545 }
546 else
547 {
548 vlib_buffer_copy_indices (buffers, bp->buffers, len);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100549 bp->n_avail = 0;
Damjan Marion910d3692019-01-21 11:48:34 +0100550 clib_spinlock_unlock (&bp->lock);
551 return len;
552 }
553}
554
555
556/** \brief Allocate buffers from specific pool into supplied array
Ed Warnickecb9cada2015-12-08 15:45:58 -0700557
558 @param vm - (vlib_main_t *) vlib main data structure pointer
559 @param buffers - (u32 * ) buffer index array
560 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400561 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700562 less than the number requested or zero
563*/
Damjan Marion910d3692019-01-21 11:48:34 +0100564
Damjan Marion878c6092017-01-04 13:19:27 +0100565always_inline u32
Damjan Marion910d3692019-01-21 11:48:34 +0100566vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
567 u8 buffer_pool_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100568{
Damjan Mariond50e3472019-01-20 00:03:56 +0100569 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion910d3692019-01-21 11:48:34 +0100570 vlib_buffer_pool_t *bp;
571 vlib_buffer_pool_thread_t *bpt;
572 u32 *src, *dst, len, n_left;
Damjan Marion878c6092017-01-04 13:19:27 +0100573
Dave Barachc74b43c2020-04-09 17:24:07 -0400574 /* If buffer allocation fault injection is configured */
575 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
576 {
577 u32 vlib_buffer_alloc_may_fail (vlib_main_t *, u32);
578
579 /* See how many buffers we're willing to allocate */
580 n_buffers = vlib_buffer_alloc_may_fail (vm, n_buffers);
581 if (n_buffers == 0)
582 return (n_buffers);
583 }
584
Damjan Marion910d3692019-01-21 11:48:34 +0100585 bp = vec_elt_at_index (bm->buffer_pools, buffer_pool_index);
586 bpt = vec_elt_at_index (bp->threads, vm->thread_index);
Damjan Marion878c6092017-01-04 13:19:27 +0100587
Damjan Marion910d3692019-01-21 11:48:34 +0100588 dst = buffers;
589 n_left = n_buffers;
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100590 len = bpt->n_cached;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100591
Damjan Marion910d3692019-01-21 11:48:34 +0100592 /* per-thread cache contains enough buffers */
593 if (len >= n_buffers)
Damjan Marionc8a26c62017-11-24 20:15:23 +0100594 {
Damjan Marion910d3692019-01-21 11:48:34 +0100595 src = bpt->cached_buffers + len - n_buffers;
596 vlib_buffer_copy_indices (dst, src, n_buffers);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100597 bpt->n_cached -= n_buffers;
598
599 if (CLIB_DEBUG > 0)
600 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
601 VLIB_BUFFER_KNOWN_FREE);
602 return n_buffers;
603 }
604
605 /* alloc bigger than cache - take buffers directly from main pool */
606 if (n_buffers >= VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ)
607 {
608 n_buffers = vlib_buffer_pool_get (vm, buffer_pool_index, buffers,
609 n_buffers);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100610
Damjan Marion910d3692019-01-21 11:48:34 +0100611 if (CLIB_DEBUG > 0)
612 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
613 VLIB_BUFFER_KNOWN_FREE);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100614 return n_buffers;
615 }
616
Damjan Marion910d3692019-01-21 11:48:34 +0100617 /* take everything available in the cache */
618 if (len)
619 {
620 vlib_buffer_copy_indices (dst, bpt->cached_buffers, len);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100621 bpt->n_cached = 0;
Damjan Marion910d3692019-01-21 11:48:34 +0100622 dst += len;
623 n_left -= len;
624 }
625
626 len = round_pow2 (n_left, 32);
Damjan Marion910d3692019-01-21 11:48:34 +0100627 len = vlib_buffer_pool_get (vm, buffer_pool_index, bpt->cached_buffers,
628 len);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100629 bpt->n_cached = len;
Damjan Marion910d3692019-01-21 11:48:34 +0100630
631 if (len)
632 {
633 u32 n_copy = clib_min (len, n_left);
634 src = bpt->cached_buffers + len - n_copy;
635 vlib_buffer_copy_indices (dst, src, n_copy);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100636 bpt->n_cached -= n_copy;
Damjan Marion910d3692019-01-21 11:48:34 +0100637 n_left -= n_copy;
638 }
639
640 n_buffers -= n_left;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100641
642 /* Verify that buffers are known free. */
Damjan Marion910d3692019-01-21 11:48:34 +0100643 if (CLIB_DEBUG > 0)
644 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
645 VLIB_BUFFER_KNOWN_FREE);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100646
647 return n_buffers;
648}
649
Damjan Marion910d3692019-01-21 11:48:34 +0100650/** \brief Allocate buffers from specific numa node into supplied array
651
652 @param vm - (vlib_main_t *) vlib main data structure pointer
653 @param buffers - (u32 * ) buffer index array
654 @param n_buffers - (u32) number of buffers requested
655 @param numa_node - (u32) numa node
656 @return - (u32) number of buffers actually allocated, may be
657 less than the number requested or zero
658*/
659always_inline u32
660vlib_buffer_alloc_on_numa (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
661 u32 numa_node)
662{
663 u8 index = vlib_buffer_pool_get_default_for_numa (vm, numa_node);
664 return vlib_buffer_alloc_from_pool (vm, buffers, n_buffers, index);
665}
666
Damjan Marionc8a26c62017-11-24 20:15:23 +0100667/** \brief Allocate buffers into supplied array
668
669 @param vm - (vlib_main_t *) vlib main data structure pointer
670 @param buffers - (u32 * ) buffer index array
671 @param n_buffers - (u32) number of buffers requested
672 @return - (u32) number of buffers actually allocated, may be
673 less than the number requested or zero
674*/
Damjan Marion910d3692019-01-21 11:48:34 +0100675
Damjan Marionc8a26c62017-11-24 20:15:23 +0100676always_inline u32
677vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
678{
Damjan Marion910d3692019-01-21 11:48:34 +0100679 return vlib_buffer_alloc_on_numa (vm, buffers, n_buffers, vm->numa_node);
Damjan Marion878c6092017-01-04 13:19:27 +0100680}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700681
Damjan Marionc58408c2018-01-18 14:54:04 +0100682/** \brief Allocate buffers into ring
683
684 @param vm - (vlib_main_t *) vlib main data structure pointer
685 @param buffers - (u32 * ) buffer index ring
686 @param start - (u32) first slot in the ring
687 @param ring_size - (u32) ring size
688 @param n_buffers - (u32) number of buffers requested
689 @return - (u32) number of buffers actually allocated, may be
690 less than the number requested or zero
691*/
692always_inline u32
693vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
694 u32 ring_size, u32 n_buffers)
695{
696 u32 n_alloc;
697
698 ASSERT (n_buffers <= ring_size);
699
700 if (PREDICT_TRUE (start + n_buffers <= ring_size))
701 return vlib_buffer_alloc (vm, ring + start, n_buffers);
702
703 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
704
705 if (PREDICT_TRUE (n_alloc == ring_size - start))
706 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
707
708 return n_alloc;
709}
710
Damjan Marion910d3692019-01-21 11:48:34 +0100711/** \brief Allocate buffers into ring from specific buffer pool
712
713 @param vm - (vlib_main_t *) vlib main data structure pointer
714 @param buffers - (u32 * ) buffer index ring
715 @param start - (u32) first slot in the ring
716 @param ring_size - (u32) ring size
717 @param n_buffers - (u32) number of buffers requested
718 @return - (u32) number of buffers actually allocated, may be
719 less than the number requested or zero
720*/
721always_inline u32
722vlib_buffer_alloc_to_ring_from_pool (vlib_main_t * vm, u32 * ring, u32 start,
723 u32 ring_size, u32 n_buffers,
724 u8 buffer_pool_index)
725{
726 u32 n_alloc;
727
728 ASSERT (n_buffers <= ring_size);
729
730 if (PREDICT_TRUE (start + n_buffers <= ring_size))
731 return vlib_buffer_alloc_from_pool (vm, ring + start, n_buffers,
732 buffer_pool_index);
733
734 n_alloc = vlib_buffer_alloc_from_pool (vm, ring + start, ring_size - start,
735 buffer_pool_index);
736
737 if (PREDICT_TRUE (n_alloc == ring_size - start))
738 n_alloc += vlib_buffer_alloc_from_pool (vm, ring, n_buffers - n_alloc,
739 buffer_pool_index);
740
741 return n_alloc;
742}
743
Damjan Marionf646d742019-01-31 18:50:04 +0100744static_always_inline void
Damjan Marion910d3692019-01-21 11:48:34 +0100745vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index,
746 u32 * buffers, u32 n_buffers)
747{
748 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100749 vlib_buffer_pool_thread_t *bpt = vec_elt_at_index (bp->threads,
750 vm->thread_index);
751 u32 n_cached, n_empty;
Damjan Marion910d3692019-01-21 11:48:34 +0100752
Damjan Marionf646d742019-01-31 18:50:04 +0100753 if (CLIB_DEBUG > 0)
754 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
755 VLIB_BUFFER_KNOWN_ALLOCATED);
756
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100757 n_cached = bpt->n_cached;
758 n_empty = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ - n_cached;
759 if (n_buffers <= n_empty)
Damjan Marion910d3692019-01-21 11:48:34 +0100760 {
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100761 vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
762 buffers, n_buffers);
763 bpt->n_cached = n_cached + n_buffers;
764 return;
Damjan Marion910d3692019-01-21 11:48:34 +0100765 }
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100766
767 vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
768 buffers + n_buffers - n_empty, n_empty);
769 bpt->n_cached = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ;
770
771 clib_spinlock_lock (&bp->lock);
772 vlib_buffer_copy_indices (bp->buffers + bp->n_avail, buffers,
773 n_buffers - n_empty);
774 bp->n_avail += n_buffers - n_empty;
775 clib_spinlock_unlock (&bp->lock);
Damjan Marion910d3692019-01-21 11:48:34 +0100776}
777
778static_always_inline void
779vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
780 int maybe_next)
781{
782 const int queue_size = 128;
783 vlib_buffer_pool_t *bp = 0;
784 u8 buffer_pool_index = ~0;
785 u32 n_queue = 0, queue[queue_size + 4];
786 vlib_buffer_t bt = { };
Lijian.Zhange6a47cf2019-03-12 18:32:39 +0800787#if defined(CLIB_HAVE_VEC128)
Damjan Marion910d3692019-01-21 11:48:34 +0100788 vlib_buffer_t bpi_mask = {.buffer_pool_index = ~0 };
789 vlib_buffer_t bpi_vec = {.buffer_pool_index = ~0 };
790 vlib_buffer_t flags_refs_mask = {
791 .flags = VLIB_BUFFER_NEXT_PRESENT,
Damjan Marion0e209242019-03-13 12:04:10 +0100792 .ref_count = ~1
Damjan Marion910d3692019-01-21 11:48:34 +0100793 };
794#endif
795
796 while (n_buffers)
797 {
798 vlib_buffer_t *b[8];
799 u32 bi, sum = 0, flags, next;
800
801 if (n_buffers < 12)
802 goto one_by_one;
803
804 vlib_get_buffers (vm, buffers, b, 4);
805 vlib_get_buffers (vm, buffers + 8, b + 4, 4);
806
807 vlib_prefetch_buffer_header (b[4], LOAD);
808 vlib_prefetch_buffer_header (b[5], LOAD);
809 vlib_prefetch_buffer_header (b[6], LOAD);
810 vlib_prefetch_buffer_header (b[7], LOAD);
811
Lijian.Zhange6a47cf2019-03-12 18:32:39 +0800812#if defined(CLIB_HAVE_VEC128)
Damjan Marion910d3692019-01-21 11:48:34 +0100813 u8x16 p0, p1, p2, p3, r;
814 p0 = u8x16_load_unaligned (b[0]);
815 p1 = u8x16_load_unaligned (b[1]);
816 p2 = u8x16_load_unaligned (b[2]);
817 p3 = u8x16_load_unaligned (b[3]);
818
819 r = p0 ^ bpi_vec.as_u8x16[0];
820 r |= p1 ^ bpi_vec.as_u8x16[0];
821 r |= p2 ^ bpi_vec.as_u8x16[0];
822 r |= p3 ^ bpi_vec.as_u8x16[0];
823 r &= bpi_mask.as_u8x16[0];
824 r |= (p0 | p1 | p2 | p3) & flags_refs_mask.as_u8x16[0];
825
826 sum = !u8x16_is_all_zero (r);
827#else
828 sum |= b[0]->flags;
829 sum |= b[1]->flags;
830 sum |= b[2]->flags;
831 sum |= b[3]->flags;
832 sum &= VLIB_BUFFER_NEXT_PRESENT;
833 sum += b[0]->ref_count - 1;
834 sum += b[1]->ref_count - 1;
835 sum += b[2]->ref_count - 1;
836 sum += b[3]->ref_count - 1;
837 sum |= b[0]->buffer_pool_index ^ buffer_pool_index;
838 sum |= b[1]->buffer_pool_index ^ buffer_pool_index;
839 sum |= b[2]->buffer_pool_index ^ buffer_pool_index;
840 sum |= b[3]->buffer_pool_index ^ buffer_pool_index;
841#endif
842
843 if (sum)
844 goto one_by_one;
845
846 vlib_buffer_copy_indices (queue + n_queue, buffers, 4);
847 vlib_buffer_copy_template (b[0], &bt);
848 vlib_buffer_copy_template (b[1], &bt);
849 vlib_buffer_copy_template (b[2], &bt);
850 vlib_buffer_copy_template (b[3], &bt);
851 n_queue += 4;
852
Damjan Marion24dcbe42019-01-31 12:29:39 +0100853 vlib_buffer_validate (vm, b[0]);
854 vlib_buffer_validate (vm, b[1]);
855 vlib_buffer_validate (vm, b[2]);
856 vlib_buffer_validate (vm, b[3]);
857
Damjan Marion910d3692019-01-21 11:48:34 +0100858 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
859 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
860 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
861 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
862
863 if (n_queue >= queue_size)
864 {
865 vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue);
866 n_queue = 0;
867 }
868 buffers += 4;
869 n_buffers -= 4;
870 continue;
871
872 one_by_one:
873 bi = buffers[0];
874
875 next_in_chain:
876 b[0] = vlib_get_buffer (vm, bi);
877 flags = b[0]->flags;
878 next = b[0]->next_buffer;
879
880 if (PREDICT_FALSE (buffer_pool_index != b[0]->buffer_pool_index))
881 {
Damjan Marion910d3692019-01-21 11:48:34 +0100882
883 if (n_queue)
884 {
885 vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue);
886 n_queue = 0;
887 }
Lollita Liu8f6c1dd2019-02-14 05:02:44 -0500888
889 buffer_pool_index = b[0]->buffer_pool_index;
Lijian.Zhange6a47cf2019-03-12 18:32:39 +0800890#if defined(CLIB_HAVE_VEC128)
Lollita Liu8f6c1dd2019-02-14 05:02:44 -0500891 bpi_vec.buffer_pool_index = buffer_pool_index;
892#endif
893 bp = vlib_get_buffer_pool (vm, buffer_pool_index);
894 vlib_buffer_copy_template (&bt, &bp->buffer_template);
Damjan Marion910d3692019-01-21 11:48:34 +0100895 }
896
Damjan Marion24dcbe42019-01-31 12:29:39 +0100897 vlib_buffer_validate (vm, b[0]);
Damjan Marion910d3692019-01-21 11:48:34 +0100898
899 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
900
901 if (clib_atomic_sub_fetch (&b[0]->ref_count, 1) == 0)
902 {
Damjan Marion910d3692019-01-21 11:48:34 +0100903 vlib_buffer_copy_template (b[0], &bt);
904 queue[n_queue++] = bi;
905 }
906
907 if (n_queue == queue_size)
908 {
909 vlib_buffer_pool_put (vm, buffer_pool_index, queue, queue_size);
910 n_queue = 0;
911 }
912
Damjan Marion5fed42a2019-06-11 20:12:23 +0200913 if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marion910d3692019-01-21 11:48:34 +0100914 {
915 bi = next;
916 goto next_in_chain;
917 }
918
919 buffers++;
920 n_buffers--;
921 }
922
923 if (n_queue)
924 vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue);
925}
926
927
Ed Warnickecb9cada2015-12-08 15:45:58 -0700928/** \brief Free buffers
929 Frees the entire buffer chain for each buffer
930
931 @param vm - (vlib_main_t *) vlib main data structure pointer
932 @param buffers - (u32 * ) buffer index array
933 @param n_buffers - (u32) number of buffers to free
934
935*/
Damjan Marion878c6092017-01-04 13:19:27 +0100936always_inline void
937vlib_buffer_free (vlib_main_t * vm,
938 /* pointer to first buffer */
939 u32 * buffers,
940 /* number of buffers to free */
941 u32 n_buffers)
942{
Damjan Marion910d3692019-01-21 11:48:34 +0100943 vlib_buffer_free_inline (vm, buffers, n_buffers, /* maybe next */ 1);
Damjan Marion878c6092017-01-04 13:19:27 +0100944}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700945
946/** \brief Free buffers, does not free the buffer chain for each buffer
947
948 @param vm - (vlib_main_t *) vlib main data structure pointer
949 @param buffers - (u32 * ) buffer index array
950 @param n_buffers - (u32) number of buffers to free
951
952*/
Damjan Marion878c6092017-01-04 13:19:27 +0100953always_inline void
954vlib_buffer_free_no_next (vlib_main_t * vm,
955 /* pointer to first buffer */
956 u32 * buffers,
957 /* number of buffers to free */
958 u32 n_buffers)
959{
Damjan Marion910d3692019-01-21 11:48:34 +0100960 vlib_buffer_free_inline (vm, buffers, n_buffers, /* maybe next */ 0);
Damjan Marion878c6092017-01-04 13:19:27 +0100961}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700962
963/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400964 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700965
966 @param vm - (vlib_main_t *) vlib main data structure pointer
967 @param buffer_index - (u32) buffer index to free
968*/
969always_inline void
970vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
971{
Damjan Marion910d3692019-01-21 11:48:34 +0100972 vlib_buffer_free_inline (vm, &buffer_index, 1, /* maybe next */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700973}
974
Damjan Mariona3731492018-02-25 22:50:39 +0100975/** \brief Free buffers from ring
976
977 @param vm - (vlib_main_t *) vlib main data structure pointer
978 @param buffers - (u32 * ) buffer index ring
979 @param start - (u32) first slot in the ring
980 @param ring_size - (u32) ring size
981 @param n_buffers - (u32) number of buffers
982*/
983always_inline void
984vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
985 u32 ring_size, u32 n_buffers)
986{
987 ASSERT (n_buffers <= ring_size);
988
989 if (PREDICT_TRUE (start + n_buffers <= ring_size))
990 {
991 vlib_buffer_free (vm, ring + start, n_buffers);
992 }
993 else
994 {
995 vlib_buffer_free (vm, ring + start, ring_size - start);
996 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
997 }
998}
999
Damjan Marioncef1db92018-03-28 18:27:38 +02001000/** \brief Free buffers from ring without freeing tail buffers
1001
1002 @param vm - (vlib_main_t *) vlib main data structure pointer
1003 @param buffers - (u32 * ) buffer index ring
1004 @param start - (u32) first slot in the ring
1005 @param ring_size - (u32) ring size
1006 @param n_buffers - (u32) number of buffers
1007*/
1008always_inline void
1009vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
1010 u32 ring_size, u32 n_buffers)
1011{
1012 ASSERT (n_buffers <= ring_size);
1013
1014 if (PREDICT_TRUE (start + n_buffers <= ring_size))
1015 {
Damjan Marion4a973932018-06-09 19:29:16 +02001016 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +02001017 }
1018 else
1019 {
1020 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
1021 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
1022 }
1023}
Damjan Mariona3731492018-02-25 22:50:39 +01001024
Ed Warnickecb9cada2015-12-08 15:45:58 -07001025/* Append given data to end of buffer, possibly allocating new buffers. */
Damjan Marionab9b7ec2019-01-18 20:24:44 +01001026int vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data,
1027 u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001028
John Lo8ed2d522019-08-07 19:30:29 -04001029/* Define vlib_buffer and vnet_buffer flags bits preserved for copy/clone */
1030#define VLIB_BUFFER_COPY_CLONE_FLAGS_MASK \
1031 (VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID | \
1032 VLIB_BUFFER_IS_TRACED | ~VLIB_BUFFER_FLAGS_ALL)
1033
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001034/* duplicate all buffers in chain */
1035always_inline vlib_buffer_t *
1036vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
1037{
1038 vlib_buffer_t *s, *d, *fd;
1039 uword n_alloc, n_buffers = 1;
John Lo8ed2d522019-08-07 19:30:29 -04001040 u32 flag_mask = VLIB_BUFFER_COPY_CLONE_FLAGS_MASK;
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001041 int i;
1042
1043 s = b;
1044 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1045 {
1046 n_buffers++;
1047 s = vlib_get_buffer (vm, s->next_buffer);
1048 }
Neale Ranns9d676af2017-03-15 01:28:31 -07001049 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001050
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001051 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -05001052
1053 /* No guarantee that we'll get all the buffers we asked for */
1054 if (PREDICT_FALSE (n_alloc < n_buffers))
1055 {
1056 if (n_alloc > 0)
1057 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -05001058 return 0;
1059 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001060
1061 /* 1st segment */
1062 s = b;
1063 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001064 d->current_data = s->current_data;
1065 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +01001066 d->flags = s->flags & flag_mask;
John Lo66317802019-08-13 18:18:21 -04001067 d->trace_handle = s->trace_handle;
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001068 d->total_length_not_including_first_buffer =
1069 s->total_length_not_including_first_buffer;
Dave Barach178cf492018-11-13 16:34:13 -05001070 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
1071 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
1072 clib_memcpy_fast (vlib_buffer_get_current (d),
1073 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001074
1075 /* next segments */
1076 for (i = 1; i < n_buffers; i++)
1077 {
1078 /* previous */
1079 d->next_buffer = new_buffers[i];
1080 /* current */
1081 s = vlib_get_buffer (vm, s->next_buffer);
1082 d = vlib_get_buffer (vm, new_buffers[i]);
1083 d->current_data = s->current_data;
1084 d->current_length = s->current_length;
Dave Barach178cf492018-11-13 16:34:13 -05001085 clib_memcpy_fast (vlib_buffer_get_current (d),
1086 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +01001087 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001088 }
1089
1090 return fd;
1091}
1092
Ole Troanda7f7b62019-03-11 13:15:54 +01001093/* duplicate first buffer in chain */
1094always_inline vlib_buffer_t *
1095vlib_buffer_copy_no_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * di)
1096{
1097 vlib_buffer_t *d;
1098
1099 if ((vlib_buffer_alloc (vm, di, 1)) != 1)
1100 return 0;
1101
1102 d = vlib_get_buffer (vm, *di);
1103 /* 1st segment */
1104 d->current_data = b->current_data;
1105 d->current_length = b->current_length;
1106 clib_memcpy_fast (d->opaque, b->opaque, sizeof (b->opaque));
1107 clib_memcpy_fast (d->opaque2, b->opaque2, sizeof (b->opaque2));
1108 clib_memcpy_fast (vlib_buffer_get_current (d),
1109 vlib_buffer_get_current (b), b->current_length);
1110
1111 return d;
1112}
1113
John Lof545caa2019-04-01 11:30:07 -04001114/* \brief Move packet from current position to offset position in buffer.
1115 Only work for small packet using one buffer with room to fit the move
1116 @param vm - (vlib_main_t *) vlib main data structure pointer
1117 @param b - (vlib_buffer_t *) pointer to buffer
1118 @param offset - (i16) position to move the packet in buffer
1119 */
1120always_inline void
1121vlib_buffer_move (vlib_main_t * vm, vlib_buffer_t * b, i16 offset)
1122{
1123 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1124 ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0);
1125 ASSERT (offset + b->current_length <
1126 vlib_buffer_get_default_data_size (vm));
1127
1128 u8 *source = vlib_buffer_get_current (b);
1129 b->current_data = offset;
1130 u8 *destination = vlib_buffer_get_current (b);
1131 u16 length = b->current_length;
1132
1133 if (source + length <= destination) /* no overlap */
1134 clib_memcpy_fast (destination, source, length);
1135 else
1136 memmove (destination, source, length);
1137}
1138
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001139/** \brief Create a maximum of 256 clones of buffer and store them
1140 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +01001141
1142 @param vm - (vlib_main_t *) vlib main data structure pointer
1143 @param src_buffer - (u32) source buffer index
1144 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001145 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +01001146 @param head_end_offset - (u16) offset relative to current position
1147 where packet head ends
John Lof545caa2019-04-01 11:30:07 -04001148 @param offset - (i16) copy packet head at current position if 0,
1149 else at offset position to change headroom space as specified
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001150 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +01001151 less than the number requested or zero
1152*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001153always_inline u16
1154vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
John Lof545caa2019-04-01 11:30:07 -04001155 u16 n_buffers, u16 head_end_offset, i16 offset)
Damjan Marionc47ed032017-01-25 14:18:03 +01001156{
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001157 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +01001158 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
1159
Damjan Marion910d3692019-01-21 11:48:34 +01001160 ASSERT (s->ref_count == 1);
Damjan Marionc47ed032017-01-25 14:18:03 +01001161 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001162 ASSERT (n_buffers <= 256);
John Lof545caa2019-04-01 11:30:07 -04001163 ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0);
1164 ASSERT ((offset + head_end_offset) <
1165 vlib_buffer_get_default_data_size (vm));
Damjan Marionc47ed032017-01-25 14:18:03 +01001166
1167 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
1168 {
1169 buffers[0] = src_buffer;
John Lof545caa2019-04-01 11:30:07 -04001170 if (offset)
1171 vlib_buffer_move (vm, s, offset);
1172
Damjan Marionc47ed032017-01-25 14:18:03 +01001173 for (i = 1; i < n_buffers; i++)
1174 {
1175 vlib_buffer_t *d;
1176 d = vlib_buffer_copy (vm, s);
1177 if (d == 0)
1178 return i;
1179 buffers[i] = vlib_get_buffer_index (vm, d);
1180
1181 }
1182 return n_buffers;
1183 }
1184
John Lof545caa2019-04-01 11:30:07 -04001185 if (PREDICT_FALSE ((n_buffers == 1) && (offset == 0)))
Damjan Marionc47ed032017-01-25 14:18:03 +01001186 {
1187 buffers[0] = src_buffer;
1188 return 1;
1189 }
1190
Damjan Marion910d3692019-01-21 11:48:34 +01001191 n_buffers = vlib_buffer_alloc_from_pool (vm, buffers, n_buffers,
1192 s->buffer_pool_index);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001193
Damjan Marionc47ed032017-01-25 14:18:03 +01001194 for (i = 0; i < n_buffers; i++)
1195 {
1196 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
John Lof545caa2019-04-01 11:30:07 -04001197 if (offset)
1198 d->current_data = offset;
1199 else
1200 d->current_data = s->current_data;
1201
Damjan Marionc47ed032017-01-25 14:18:03 +01001202 d->current_length = head_end_offset;
Damjan Marion910d3692019-01-21 11:48:34 +01001203 ASSERT (d->buffer_pool_index == s->buffer_pool_index);
1204
Yoann Desmouceaux1977a342018-05-29 13:38:44 +02001205 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +01001206 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +02001207 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
1208 {
1209 d->total_length_not_including_first_buffer +=
1210 s->total_length_not_including_first_buffer;
1211 }
John Lo8ed2d522019-08-07 19:30:29 -04001212 d->flags = (s->flags & VLIB_BUFFER_COPY_CLONE_FLAGS_MASK) |
1213 VLIB_BUFFER_NEXT_PRESENT;
John Lo66317802019-08-13 18:18:21 -04001214 d->trace_handle = s->trace_handle;
Dave Barach178cf492018-11-13 16:34:13 -05001215 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
1216 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
1217 clib_memcpy_fast (vlib_buffer_get_current (d),
1218 vlib_buffer_get_current (s), head_end_offset);
Damjan Marionc47ed032017-01-25 14:18:03 +01001219 d->next_buffer = src_buffer;
1220 }
1221 vlib_buffer_advance (s, head_end_offset);
Dave Barach95e19252020-04-07 10:52:43 -04001222 s->ref_count = n_buffers ? n_buffers : s->ref_count;
Damjan Marionc47ed032017-01-25 14:18:03 +01001223 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1224 {
1225 s = vlib_get_buffer (vm, s->next_buffer);
Dave Barach95e19252020-04-07 10:52:43 -04001226 s->ref_count = n_buffers ? n_buffers : s->ref_count;
Damjan Marionc47ed032017-01-25 14:18:03 +01001227 }
1228
1229 return n_buffers;
1230}
1231
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001232/** \brief Create multiple clones of buffer and store them
1233 in the supplied array
1234
1235 @param vm - (vlib_main_t *) vlib main data structure pointer
1236 @param src_buffer - (u32) source buffer index
1237 @param buffers - (u32 * ) buffer index array
1238 @param n_buffers - (u16) number of buffer clones requested (<=256)
1239 @param head_end_offset - (u16) offset relative to current position
1240 where packet head ends
John Lof545caa2019-04-01 11:30:07 -04001241 @param offset - (i16) copy packet head at current position if 0,
1242 else at offset position to change headroom space as specified
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001243 @return - (u16) number of buffers actually cloned, may be
1244 less than the number requested or zero
1245*/
1246always_inline u16
John Lof545caa2019-04-01 11:30:07 -04001247vlib_buffer_clone_at_offset (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
1248 u16 n_buffers, u16 head_end_offset, i16 offset)
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001249{
1250 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
1251 u16 n_cloned = 0;
1252
1253 while (n_buffers > 256)
1254 {
1255 vlib_buffer_t *copy;
1256 copy = vlib_buffer_copy (vm, s);
1257 n_cloned += vlib_buffer_clone_256 (vm,
1258 vlib_get_buffer_index (vm, copy),
1259 (buffers + n_cloned),
John Lof545caa2019-04-01 11:30:07 -04001260 256, head_end_offset, offset);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001261 n_buffers -= 256;
1262 }
1263 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
1264 buffers + n_cloned,
John Lof545caa2019-04-01 11:30:07 -04001265 n_buffers, head_end_offset, offset);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001266
1267 return n_cloned;
1268}
1269
John Lof545caa2019-04-01 11:30:07 -04001270/** \brief Create multiple clones of buffer and store them
1271 in the supplied array
1272
1273 @param vm - (vlib_main_t *) vlib main data structure pointer
1274 @param src_buffer - (u32) source buffer index
1275 @param buffers - (u32 * ) buffer index array
1276 @param n_buffers - (u16) number of buffer clones requested (<=256)
1277 @param head_end_offset - (u16) offset relative to current position
1278 where packet head ends
1279 @return - (u16) number of buffers actually cloned, may be
1280 less than the number requested or zero
1281*/
1282always_inline u16
1283vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
1284 u16 n_buffers, u16 head_end_offset)
1285{
1286 return vlib_buffer_clone_at_offset (vm, src_buffer, buffers, n_buffers,
1287 head_end_offset, 0);
1288}
1289
Damjan Marionc47ed032017-01-25 14:18:03 +01001290/** \brief Attach cloned tail to the buffer
1291
1292 @param vm - (vlib_main_t *) vlib main data structure pointer
1293 @param head - (vlib_buffer_t *) head buffer
1294 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
1295*/
1296
1297always_inline void
1298vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
1299 vlib_buffer_t * tail)
1300{
1301 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion910d3692019-01-21 11:48:34 +01001302 ASSERT (head->buffer_pool_index == tail->buffer_pool_index);
Damjan Marionc47ed032017-01-25 14:18:03 +01001303
1304 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
1305 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1306 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
1307 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
1308 head->next_buffer = vlib_get_buffer_index (vm, tail);
1309 head->total_length_not_including_first_buffer = tail->current_length +
1310 tail->total_length_not_including_first_buffer;
1311
1312next_segment:
Damjan Marion910d3692019-01-21 11:48:34 +01001313 clib_atomic_add_fetch (&tail->ref_count, 1);
Damjan Marionc47ed032017-01-25 14:18:03 +01001314
1315 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
1316 {
1317 tail = vlib_get_buffer (vm, tail->next_buffer);
1318 goto next_segment;
1319 }
1320}
1321
Pierre Pfister328e99b2016-02-12 13:18:42 +00001322/* Initializes the buffer as an empty packet with no chained buffers. */
1323always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -04001324vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001325{
1326 first->total_length_not_including_first_buffer = 0;
1327 first->current_length = 0;
1328 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1329 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +00001330}
1331
1332/* The provided next_bi buffer index is appended to the end of the packet. */
1333always_inline vlib_buffer_t *
Eyal Barib688fb12018-11-12 16:13:49 +02001334vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001335{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001336 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001337 last->next_buffer = next_bi;
1338 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
1339 next_buffer->current_length = 0;
1340 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +00001341 return next_buffer;
1342}
1343
1344/* Increases or decreases the packet length.
1345 * It does not allocate or deallocate new buffers.
1346 * Therefore, the added length must be compatible
1347 * with the last buffer. */
1348always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -04001349vlib_buffer_chain_increase_length (vlib_buffer_t * first,
1350 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001351{
1352 last->current_length += len;
1353 if (first != last)
1354 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +00001355}
1356
1357/* Copy data to the end of the packet and increases its length.
1358 * It does not allocate new buffers.
1359 * Returns the number of copied bytes. */
1360always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -04001361vlib_buffer_chain_append_data (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001362 vlib_buffer_t * first,
1363 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001364{
Damjan Marion8934a042019-02-09 23:29:26 +01001365 u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001366 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
1367 u16 len = clib_min (data_len,
1368 n_buffer_bytes - last->current_length -
1369 last->current_data);
Dave Barach178cf492018-11-13 16:34:13 -05001370 clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length,
1371 data, len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001372 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001373 return len;
1374}
1375
1376/* Copy data to the end of the packet and increases its length.
1377 * Allocates additional buffers from the free list if necessary.
1378 * Returns the number of copied bytes.
1379 * 'last' value is modified whenever new buffers are allocated and
1380 * chained and points to the last buffer in the chain. */
1381u16
Dave Barach9b8ffd92016-07-08 08:13:45 -04001382vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001383 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +01001384 vlib_buffer_t ** last, void *data,
1385 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001386void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001387
Dave Barach9b8ffd92016-07-08 08:13:45 -04001388format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
Benoît Ganne43543172019-10-21 15:13:54 +02001389 format_vlib_buffer_contents, format_vlib_buffer_no_chain;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001390
Dave Barach9b8ffd92016-07-08 08:13:45 -04001391typedef struct
1392{
Ed Warnickecb9cada2015-12-08 15:45:58 -07001393 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001394 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001395
Damjan Mariond1274cb2018-03-13 21:32:17 +01001396 /* Number of buffers to allocate in each call to allocator. */
1397 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001398
Damjan Marion671e60e2018-12-30 18:09:59 +01001399 u8 *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001400} vlib_packet_template_t;
1401
Ed Warnickecb9cada2015-12-08 15:45:58 -07001402void vlib_packet_template_init (vlib_main_t * vm,
1403 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001404 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001405 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +01001406 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001407 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001408
Dave Barach9b8ffd92016-07-08 08:13:45 -04001409void *vlib_packet_template_get_packet (vlib_main_t * vm,
1410 vlib_packet_template_t * t,
1411 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001412
1413always_inline void
1414vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1415{
1416 vec_free (t->packet_data);
1417}
1418
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001419always_inline u32
1420vlib_buffer_space_left_at_end (vlib_main_t * vm, vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +02001421{
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001422 return b->data + vlib_buffer_get_default_data_size (vm) -
1423 ((u8 *) vlib_buffer_get_current (b) + b->current_length);
Klement Sekera75e7d132017-09-20 08:26:30 +02001424}
1425
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001426always_inline u32
1427vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b)
Eyal Barid3d42412018-11-05 13:29:25 +02001428{
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001429 vlib_buffer_t *db = b, *sb, *first = b;
1430 int is_cloned = 0;
1431 u32 bytes_left = 0, data_size;
1432 u16 src_left, dst_left, n_buffers = 1;
1433 u8 *dp, *sp;
1434 u32 to_free = 0;
Eyal Barid3d42412018-11-05 13:29:25 +02001435
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001436 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
1437 return 1;
Eyal Barid3d42412018-11-05 13:29:25 +02001438
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001439 data_size = vlib_buffer_get_default_data_size (vm);
1440
1441 dst_left = vlib_buffer_space_left_at_end (vm, b);
1442
1443 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
Eyal Barid3d42412018-11-05 13:29:25 +02001444 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001445 b = vlib_get_buffer (vm, b->next_buffer);
1446 if (b->ref_count > 1)
1447 is_cloned = 1;
1448 bytes_left += b->current_length;
1449 n_buffers++;
Eyal Barid3d42412018-11-05 13:29:25 +02001450 }
1451
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001452 /* if buffer is cloned, create completely new chain - unless everything fits
1453 * into one buffer */
1454 if (is_cloned && bytes_left >= dst_left)
Eyal Barid3d42412018-11-05 13:29:25 +02001455 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001456 u32 len = 0;
1457 u32 space_needed = bytes_left - dst_left;
1458 u32 tail;
1459
1460 if (vlib_buffer_alloc (vm, &tail, 1) == 0)
1461 return 0;
1462
1463 ++n_buffers;
1464 len += data_size;
1465 b = vlib_get_buffer (vm, tail);
1466
1467 while (len < space_needed)
Eyal Barid3d42412018-11-05 13:29:25 +02001468 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001469 u32 bi;
1470 if (vlib_buffer_alloc (vm, &bi, 1) == 0)
1471 {
1472 vlib_buffer_free_one (vm, tail);
1473 return 0;
1474 }
1475 b->flags = VLIB_BUFFER_NEXT_PRESENT;
1476 b->next_buffer = bi;
1477 b = vlib_get_buffer (vm, bi);
1478 len += data_size;
1479 n_buffers++;
1480 }
1481 sb = vlib_get_buffer (vm, first->next_buffer);
1482 to_free = first->next_buffer;
1483 first->next_buffer = tail;
1484 }
1485 else
1486 sb = vlib_get_buffer (vm, first->next_buffer);
1487
1488 src_left = sb->current_length;
1489 sp = vlib_buffer_get_current (sb);
1490 dp = vlib_buffer_get_tail (db);
1491
1492 while (bytes_left)
1493 {
1494 u16 bytes_to_copy;
1495
1496 if (dst_left == 0)
1497 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001498 db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
1499 ASSERT (db->flags & VLIB_BUFFER_NEXT_PRESENT);
1500 db = vlib_get_buffer (vm, db->next_buffer);
1501 dst_left = data_size;
Klement Sekerac09b7fd2019-06-04 21:14:26 +02001502 if (db->current_data > 0)
1503 {
1504 db->current_data = 0;
1505 }
1506 else
1507 {
1508 dst_left += -db->current_data;
1509 }
1510 dp = vlib_buffer_get_current (db);
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001511 }
1512
1513 while (src_left == 0)
1514 {
1515 ASSERT (sb->flags & VLIB_BUFFER_NEXT_PRESENT);
1516 sb = vlib_get_buffer (vm, sb->next_buffer);
1517 src_left = sb->current_length;
1518 sp = vlib_buffer_get_current (sb);
1519 }
1520
1521 bytes_to_copy = clib_min (dst_left, src_left);
1522
1523 if (dp != sp)
1524 {
1525 if (sb == db)
1526 bytes_to_copy = clib_min (bytes_to_copy, sp - dp);
1527
1528 clib_memcpy_fast (dp, sp, bytes_to_copy);
1529 }
1530
1531 src_left -= bytes_to_copy;
1532 dst_left -= bytes_to_copy;
1533 dp += bytes_to_copy;
1534 sp += bytes_to_copy;
1535 bytes_left -= bytes_to_copy;
1536 }
1537 if (db != first)
1538 db->current_data = 0;
1539 db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
1540
1541 if (is_cloned && to_free)
1542 vlib_buffer_free_one (vm, to_free);
1543 else
1544 {
1545 if (db->flags & VLIB_BUFFER_NEXT_PRESENT)
1546 vlib_buffer_free_one (vm, db->next_buffer);
1547 db->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1548 b = first;
1549 n_buffers = 1;
1550 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1551 {
1552 b = vlib_get_buffer (vm, b->next_buffer);
1553 ++n_buffers;
Eyal Barid3d42412018-11-05 13:29:25 +02001554 }
1555 }
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001556
1557 first->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1558
1559 return n_buffers;
Eyal Barid3d42412018-11-05 13:29:25 +02001560}
1561
Ed Warnickecb9cada2015-12-08 15:45:58 -07001562#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001563
1564/*
1565 * fd.io coding-style-patch-verification: ON
1566 *
1567 * Local Variables:
1568 * eval: (c-set-style "gnu")
1569 * End:
1570 */