blob: d579e1bc2e1c23656f6c0aba1329bce399ef42a0 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Klement Sekeraf883f6a2019-02-13 11:01:32 +010045#include <vlib/buffer.h>
46#include <vlib/physmem_funcs.h>
47#include <vlib/main.h>
48#include <vlib/node.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
50/** \file
51 vlib buffer access methods.
52*/
53
Damjan Marion1c229712021-04-21 12:55:15 +020054typedef void (vlib_buffer_enqueue_to_next_fn_t) (vlib_main_t *vm,
55 vlib_node_runtime_t *node,
56 u32 *buffers, u16 *nexts,
57 uword count);
58typedef void (vlib_buffer_enqueue_to_single_next_fn_t) (
59 vlib_main_t *vm, vlib_node_runtime_t *node, u32 *ers, u16 next_index,
60 u32 count);
61
62typedef u32 (vlib_buffer_enqueue_to_thread_fn_t) (
63 vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices,
64 u16 *thread_indices, u32 n_packets, int drop_on_congestion);
Damjan Marioneee099e2021-05-01 14:56:13 +020065
66typedef u32 (vlib_frame_queue_dequeue_fn_t) (vlib_main_t *vm,
67 vlib_frame_queue_main_t *fqm);
68
Damjan Marion1c229712021-04-21 12:55:15 +020069typedef struct
70{
71 vlib_buffer_enqueue_to_next_fn_t *buffer_enqueue_to_next_fn;
72 vlib_buffer_enqueue_to_single_next_fn_t *buffer_enqueue_to_single_next_fn;
73 vlib_buffer_enqueue_to_thread_fn_t *buffer_enqueue_to_thread_fn;
Damjan Marioneee099e2021-05-01 14:56:13 +020074 vlib_frame_queue_dequeue_fn_t *frame_queue_dequeue_fn;
Damjan Marion1c229712021-04-21 12:55:15 +020075} vlib_buffer_func_main_t;
76
77extern vlib_buffer_func_main_t vlib_buffer_func_main;
78
Damjan Marion24dcbe42019-01-31 12:29:39 +010079always_inline void
80vlib_buffer_validate (vlib_main_t * vm, vlib_buffer_t * b)
81{
82 vlib_buffer_main_t *bm = vm->buffer_main;
83 vlib_buffer_pool_t *bp;
84
85 /* reference count in allocated buffer always must be 1 or higher */
86 ASSERT (b->ref_count > 0);
87
Damjan Marion24dcbe42019-01-31 12:29:39 +010088 /* verify that buffer pool index is valid */
89 bp = vec_elt_at_index (bm->buffer_pools, b->buffer_pool_index);
90 ASSERT (pointer_to_uword (b) >= bp->start);
91 ASSERT (pointer_to_uword (b) < bp->start + bp->size -
Damjan Marion5de3fec2019-02-06 14:22:32 +010092 (bp->data_size + sizeof (vlib_buffer_t)));
Damjan Marion24dcbe42019-01-31 12:29:39 +010093}
94
95always_inline void *
96vlib_buffer_ptr_from_index (uword buffer_mem_start, u32 buffer_index,
97 uword offset)
98{
99 offset += ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
100 return uword_to_pointer (buffer_mem_start + offset, vlib_buffer_t *);
101}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102
103/** \brief Translate buffer index into buffer pointer
104
105 @param vm - (vlib_main_t *) vlib main data structure pointer
106 @param buffer_index - (u32) buffer index
107 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400108*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109always_inline vlib_buffer_t *
110vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
111{
Damjan Mariond50e3472019-01-20 00:03:56 +0100112 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion24dcbe42019-01-31 12:29:39 +0100113 vlib_buffer_t *b;
Damjan Marion04a7f052017-07-10 15:06:17 +0200114
Damjan Marion24dcbe42019-01-31 12:29:39 +0100115 b = vlib_buffer_ptr_from_index (bm->buffer_mem_start, buffer_index, 0);
116 vlib_buffer_validate (vm, b);
117 return b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118}
119
Damjan Marion5de3fec2019-02-06 14:22:32 +0100120static_always_inline u32
Damjan Marion8934a042019-02-09 23:29:26 +0100121vlib_buffer_get_default_data_size (vlib_main_t * vm)
Damjan Marion5de3fec2019-02-06 14:22:32 +0100122{
123 return vm->buffer_main->default_data_size;
124}
125
Damjan Marione58041f2019-01-18 19:56:09 +0100126static_always_inline void
Damjan Marion64d557c2019-01-18 20:03:41 +0100127vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices)
128{
Damjan Marion856d0622021-04-21 21:11:35 +0200129 clib_memcpy_u32 (dst, src, n_indices);
Damjan Marion64d557c2019-01-18 20:03:41 +0100130}
131
Damjan Marionb9250a92020-03-02 19:02:15 +0100132always_inline void
133vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start,
134 u32 ring_size, u32 n_buffers)
135{
136 ASSERT (n_buffers <= ring_size);
137
138 if (PREDICT_TRUE (start + n_buffers <= ring_size))
139 {
140 vlib_buffer_copy_indices (dst, ring + start, n_buffers);
141 }
142 else
143 {
144 u32 n = ring_size - start;
145 vlib_buffer_copy_indices (dst, ring + start, n);
146 vlib_buffer_copy_indices (dst + n, ring, n_buffers - n);
147 }
148}
149
Benoît Ganne72f49212020-03-19 11:41:07 +0100150always_inline void
151vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start,
152 u32 ring_size, u32 n_buffers)
153{
154 ASSERT (n_buffers <= ring_size);
155
156 if (PREDICT_TRUE (start + n_buffers <= ring_size))
157 {
158 vlib_buffer_copy_indices (ring + start, src, n_buffers);
159 }
160 else
161 {
162 u32 n = ring_size - start;
163 vlib_buffer_copy_indices (ring + start, src, n);
164 vlib_buffer_copy_indices (ring, src + n, n_buffers - n);
165 }
166}
167
Damjan Marion9a8a12a2019-01-23 16:52:10 +0100168STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
Damjan Marion64d557c2019-01-18 20:03:41 +0100169static_always_inline void
Damjan Marione58041f2019-01-18 19:56:09 +0100170vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
171{
Damjan Marion22f23ae2019-01-24 15:36:57 +0100172#if defined CLIB_HAVE_VEC512
173 b->as_u8x64[0] = bt->as_u8x64[0];
174#elif defined (CLIB_HAVE_VEC256)
Damjan Marion9a8a12a2019-01-23 16:52:10 +0100175 b->as_u8x32[0] = bt->as_u8x32[0];
176 b->as_u8x32[1] = bt->as_u8x32[1];
177#elif defined (CLIB_HAVE_VEC128)
178 b->as_u8x16[0] = bt->as_u8x16[0];
179 b->as_u8x16[1] = bt->as_u8x16[1];
180 b->as_u8x16[2] = bt->as_u8x16[2];
181 b->as_u8x16[3] = bt->as_u8x16[3];
182#else
183 clib_memcpy_fast (b, bt, 64);
184#endif
Damjan Marione58041f2019-01-18 19:56:09 +0100185}
186
Damjan Marion910d3692019-01-21 11:48:34 +0100187always_inline u8
188vlib_buffer_pool_get_default_for_numa (vlib_main_t * vm, u32 numa_node)
189{
Damjan Marionb592d1b2019-02-28 23:16:11 +0100190 ASSERT (numa_node < VLIB_BUFFER_MAX_NUMA_NODES);
191 return vm->buffer_main->default_buffer_pool_index_for_numa[numa_node];
Damjan Marion910d3692019-01-21 11:48:34 +0100192}
193
Damjan Marionafe56de2018-05-17 12:44:00 +0200194/** \brief Translate array of buffer indices into buffer pointers with offset
195
196 @param vm - (vlib_main_t *) vlib main data structure pointer
197 @param bi - (u32 *) array of buffer indices
198 @param b - (void **) array to store buffer pointers
199 @param count - (uword) number of elements
200 @param offset - (i32) offset applied to each pointer
201*/
202static_always_inline void
203vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
204 i32 offset)
205{
Damjan Mariond50e3472019-01-20 00:03:56 +0100206 uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
Damjan Marion54208852021-04-21 15:25:47 +0200207#ifdef CLIB_HAVE_VEC512
208 u64x8 of8 = u64x8_splat (buffer_mem_start + offset);
209 u64x4 off = u64x8_extract_lo (of8);
Damjan Marionafe56de2018-05-17 12:44:00 +0200210 /* if count is not const, compiler will not unroll while loop
211 se we maintain two-in-parallel variant */
Damjan Marion54208852021-04-21 15:25:47 +0200212 while (count >= 32)
213 {
214 u64x8 b0 = u64x8_from_u32x8 (u32x8_load_unaligned (bi));
215 u64x8 b1 = u64x8_from_u32x8 (u32x8_load_unaligned (bi + 8));
216 u64x8 b2 = u64x8_from_u32x8 (u32x8_load_unaligned (bi + 16));
217 u64x8 b3 = u64x8_from_u32x8 (u32x8_load_unaligned (bi + 24));
218 /* shift and add to get vlib_buffer_t pointer */
219 u64x8_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + of8, b);
220 u64x8_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + of8, b + 8);
221 u64x8_store_unaligned ((b2 << CLIB_LOG2_CACHE_LINE_BYTES) + of8, b + 16);
222 u64x8_store_unaligned ((b3 << CLIB_LOG2_CACHE_LINE_BYTES) + of8, b + 24);
223 b += 32;
224 bi += 32;
225 count -= 32;
226 }
Damjan Marionafe56de2018-05-17 12:44:00 +0200227 while (count >= 8)
228 {
Damjan Marion54208852021-04-21 15:25:47 +0200229 u64x8 b0 = u64x8_from_u32x8 (u32x8_load_unaligned (bi));
Damjan Marionafe56de2018-05-17 12:44:00 +0200230 /* shift and add to get vlib_buffer_t pointer */
Damjan Marion54208852021-04-21 15:25:47 +0200231 u64x8_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + of8, b);
Damjan Marionafe56de2018-05-17 12:44:00 +0200232 b += 8;
233 bi += 8;
234 count -= 8;
235 }
Damjan Marion54208852021-04-21 15:25:47 +0200236#elif defined CLIB_HAVE_VEC256
237 u64x4 off = u64x4_splat (buffer_mem_start + offset);
238 /* if count is not const, compiler will not unroll while loop
239 se we maintain two-in-parallel variant */
240 while (count >= 32)
241 {
242 u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi));
243 u64x4 b1 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 4));
244 u64x4 b2 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 8));
245 u64x4 b3 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 12));
246 u64x4 b4 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 16));
247 u64x4 b5 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 20));
248 u64x4 b6 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 24));
249 u64x4 b7 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 28));
250 /* shift and add to get vlib_buffer_t pointer */
251 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
252 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
253 u64x4_store_unaligned ((b2 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 8);
254 u64x4_store_unaligned ((b3 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 12);
255 u64x4_store_unaligned ((b4 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 16);
256 u64x4_store_unaligned ((b5 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 20);
257 u64x4_store_unaligned ((b6 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 24);
258 u64x4_store_unaligned ((b7 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 28);
259 b += 32;
260 bi += 32;
261 count -= 32;
262 }
Damjan Marionafe56de2018-05-17 12:44:00 +0200263#endif
264 while (count >= 4)
265 {
266#ifdef CLIB_HAVE_VEC256
Damjan Marion90d05bc2020-08-31 17:18:26 +0200267 u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi));
Damjan Marionafe56de2018-05-17 12:44:00 +0200268 /* shift and add to get vlib_buffer_t pointer */
269 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800270#elif defined (CLIB_HAVE_VEC128)
Damjan Mariond50e3472019-01-20 00:03:56 +0100271 u64x2 off = u64x2_splat (buffer_mem_start + offset);
Damjan Marion5df580e2018-07-27 01:47:57 +0200272 u32x4 bi4 = u32x4_load_unaligned (bi);
Damjan Marion90d05bc2020-08-31 17:18:26 +0200273 u64x2 b0 = u64x2_from_u32x4 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800274#if defined (__aarch64__)
Damjan Marion90d05bc2020-08-31 17:18:26 +0200275 u64x2 b1 = u64x2_from_u32x4_high ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800276#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200277 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
Damjan Marion90d05bc2020-08-31 17:18:26 +0200278 u64x2 b1 = u64x2_from_u32x4 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800279#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200280 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
281 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200282#else
Damjan Marion24dcbe42019-01-31 12:29:39 +0100283 b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
284 b[1] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[1], offset);
285 b[2] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[2], offset);
286 b[3] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[3], offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200287#endif
288 b += 4;
289 bi += 4;
290 count -= 4;
291 }
292 while (count)
293 {
Damjan Marion24dcbe42019-01-31 12:29:39 +0100294 b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200295 b += 1;
296 bi += 1;
297 count -= 1;
298 }
299}
300
301/** \brief Translate array of buffer indices into buffer pointers
302
303 @param vm - (vlib_main_t *) vlib main data structure pointer
304 @param bi - (u32 *) array of buffer indices
305 @param b - (vlib_buffer_t **) array to store buffer pointers
306 @param count - (uword) number of elements
307*/
308
309static_always_inline void
310vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
311{
312 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
313}
314
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315/** \brief Translate buffer pointer into buffer index
316
317 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400318 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400320*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200321
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400323vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324{
Damjan Mariond50e3472019-01-20 00:03:56 +0100325 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200326 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
327 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
328 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400329 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
331}
332
Damjan Marionafe56de2018-05-17 12:44:00 +0200333/** \brief Translate array of buffer pointers into buffer indices with offset
334
335 @param vm - (vlib_main_t *) vlib main data structure pointer
336 @param b - (void **) array of buffer pointers
337 @param bi - (u32 *) array to store buffer indices
338 @param count - (uword) number of elements
339 @param offset - (i32) offset applied to each pointer
340*/
341static_always_inline void
342vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
343 uword count, i32 offset)
344{
345#ifdef CLIB_HAVE_VEC256
346 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
Damjan Mariond50e3472019-01-20 00:03:56 +0100347 u64x4 off4 = u64x4_splat (vm->buffer_main->buffer_mem_start - offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200348
349 while (count >= 8)
350 {
351 /* load 4 pointers into 256-bit register */
352 u64x4 v0 = u64x4_load_unaligned (b);
353 u64x4 v1 = u64x4_load_unaligned (b + 4);
354 u32x8 v2, v3;
355
356 v0 -= off4;
357 v1 -= off4;
358
359 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
360 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
361
362 /* permute 256-bit register so lower u32s of each buffer index are
363 * placed into lower 128-bits */
364 v2 = u32x8_permute ((u32x8) v0, mask);
365 v3 = u32x8_permute ((u32x8) v1, mask);
366
367 /* extract lower 128-bits and save them to the array of buffer indices */
368 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
369 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
370 bi += 8;
371 b += 8;
372 count -= 8;
373 }
374#endif
375 while (count >= 4)
376 {
377 /* equivalent non-nector implementation */
378 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
379 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
380 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
381 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
382 bi += 4;
383 b += 4;
384 count -= 4;
385 }
386 while (count)
387 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400388 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200389 bi += 1;
390 b += 1;
391 count -= 1;
392 }
393}
394
395/** \brief Translate array of buffer pointers into buffer indices
396
397 @param vm - (vlib_main_t *) vlib main data structure pointer
398 @param b - (vlib_buffer_t **) array of buffer pointers
399 @param bi - (u32 *) array to store buffer indices
400 @param count - (uword) number of elements
401*/
402static_always_inline void
403vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
404 uword count)
405{
406 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
407}
408
Ed Warnickecb9cada2015-12-08 15:45:58 -0700409/** \brief Get next buffer in buffer linklist, or zero for end of list.
410
411 @param vm - (vlib_main_t *) vlib main data structure pointer
412 @param b - (void *) buffer pointer
413 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400414*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700415always_inline vlib_buffer_t *
416vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
417{
418 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400419 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700420}
421
Dave Barach9b8ffd92016-07-08 08:13:45 -0400422uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
423 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700424
425/** \brief Get length in bytes of the buffer chain
426
427 @param vm - (vlib_main_t *) vlib main data structure pointer
428 @param b - (void *) buffer pointer
429 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400430*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700431always_inline uword
432vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
433{
Damjan Marion072401e2017-07-13 18:53:27 +0200434 uword len = b->current_length;
435
436 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
437 return len;
438
439 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
440 return len + b->total_length_not_including_first_buffer;
441
442 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700443}
444
445/** \brief Get length in bytes of the buffer index buffer chain
446
447 @param vm - (vlib_main_t *) vlib main data structure pointer
448 @param bi - (u32) buffer index
449 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400450*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700451always_inline uword
452vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
453{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400454 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455 return vlib_buffer_length_in_chain (vm, b);
456}
457
458/** \brief Copy buffer contents to memory
459
460 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400461 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700462 @param contents - (u8 *) memory, <strong>must be large enough</strong>
463 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400464*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700465always_inline uword
466vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
467{
468 uword content_len = 0;
469 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400470 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471
472 while (1)
473 {
474 b = vlib_get_buffer (vm, buffer_index);
475 l = b->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500476 clib_memcpy_fast (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700477 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400478 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700479 break;
480 buffer_index = b->next_buffer;
481 }
482
483 return content_len;
484}
485
Damjan Marion8f499362018-10-22 13:07:02 +0200486always_inline uword
487vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700488{
Damjan Marion68b4da62018-09-30 18:26:20 +0200489 return vlib_physmem_get_pa (vm, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700490}
491
Damjan Marion8f499362018-10-22 13:07:02 +0200492always_inline uword
493vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b)
494{
495 return vlib_buffer_get_pa (vm, b) + b->current_data;
496}
497
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498/** \brief Prefetch buffer metadata by buffer index
499 The first 64 bytes of buffer contains most header information
500
501 @param vm - (vlib_main_t *) vlib main data structure pointer
502 @param bi - (u32) buffer index
503 @param type - LOAD, STORE. In most cases, STORE is the right answer
504*/
505/* Prefetch buffer header given index. */
506#define vlib_prefetch_buffer_with_index(vm,bi,type) \
507 do { \
508 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
509 vlib_prefetch_buffer_header (_b, type); \
510 } while (0)
511
Dave Barach9b8ffd92016-07-08 08:13:45 -0400512typedef enum
513{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700514 /* Index is unknown. */
515 VLIB_BUFFER_UNKNOWN,
516
517 /* Index is known and free/allocated. */
518 VLIB_BUFFER_KNOWN_FREE,
519 VLIB_BUFFER_KNOWN_ALLOCATED,
520} vlib_buffer_known_state_t;
521
Damjan Marionc8a26c62017-11-24 20:15:23 +0100522void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
523 uword n_buffers,
524 vlib_buffer_known_state_t
525 expected_state);
526
Ed Warnickecb9cada2015-12-08 15:45:58 -0700527always_inline vlib_buffer_known_state_t
Damjan Mariond50e3472019-01-20 00:03:56 +0100528vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700529{
Damjan Mariond50e3472019-01-20 00:03:56 +0100530 vlib_buffer_main_t *bm = vm->buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700531
Damjan Marion6b0f5892017-07-27 04:01:24 -0400532 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400533 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400534 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700535 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
536}
537
Ed Warnickecb9cada2015-12-08 15:45:58 -0700538/* Validates sanity of a single buffer.
539 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400540u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
541 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700542
Benoît Ganne2b65f9c2019-11-21 16:53:31 +0100543u8 *vlib_validate_buffers (vlib_main_t * vm,
544 u32 * buffers,
545 uword next_buffer_stride,
546 uword n_buffers,
547 vlib_buffer_known_state_t known_state,
548 uword follow_buffer_next);
549
Damjan Marion910d3692019-01-21 11:48:34 +0100550static_always_inline vlib_buffer_pool_t *
551vlib_get_buffer_pool (vlib_main_t * vm, u8 buffer_pool_index)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400552{
Damjan Marion910d3692019-01-21 11:48:34 +0100553 vlib_buffer_main_t *bm = vm->buffer_main;
554 return vec_elt_at_index (bm->buffer_pools, buffer_pool_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400555}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700556
Damjan Mariona2185122020-04-08 00:52:53 +0200557static_always_inline __clib_warn_unused_result uword
Damjan Marion910d3692019-01-21 11:48:34 +0100558vlib_buffer_pool_get (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers,
559 u32 n_buffers)
560{
561 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
562 u32 len;
563
564 ASSERT (bp->buffers);
565
566 clib_spinlock_lock (&bp->lock);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100567 len = bp->n_avail;
Damjan Marion910d3692019-01-21 11:48:34 +0100568 if (PREDICT_TRUE (n_buffers < len))
569 {
570 len -= n_buffers;
571 vlib_buffer_copy_indices (buffers, bp->buffers + len, n_buffers);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100572 bp->n_avail = len;
Damjan Marion910d3692019-01-21 11:48:34 +0100573 clib_spinlock_unlock (&bp->lock);
574 return n_buffers;
575 }
576 else
577 {
578 vlib_buffer_copy_indices (buffers, bp->buffers, len);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100579 bp->n_avail = 0;
Damjan Marion910d3692019-01-21 11:48:34 +0100580 clib_spinlock_unlock (&bp->lock);
581 return len;
582 }
583}
584
585
586/** \brief Allocate buffers from specific pool into supplied array
Ed Warnickecb9cada2015-12-08 15:45:58 -0700587
588 @param vm - (vlib_main_t *) vlib main data structure pointer
589 @param buffers - (u32 * ) buffer index array
590 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400591 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700592 less than the number requested or zero
593*/
Damjan Marion910d3692019-01-21 11:48:34 +0100594
Damjan Mariona2185122020-04-08 00:52:53 +0200595always_inline __clib_warn_unused_result u32
Damjan Marion910d3692019-01-21 11:48:34 +0100596vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
597 u8 buffer_pool_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100598{
Damjan Mariond50e3472019-01-20 00:03:56 +0100599 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion910d3692019-01-21 11:48:34 +0100600 vlib_buffer_pool_t *bp;
601 vlib_buffer_pool_thread_t *bpt;
602 u32 *src, *dst, len, n_left;
Damjan Marion878c6092017-01-04 13:19:27 +0100603
Dave Barachc74b43c2020-04-09 17:24:07 -0400604 /* If buffer allocation fault injection is configured */
605 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
606 {
607 u32 vlib_buffer_alloc_may_fail (vlib_main_t *, u32);
608
609 /* See how many buffers we're willing to allocate */
610 n_buffers = vlib_buffer_alloc_may_fail (vm, n_buffers);
611 if (n_buffers == 0)
612 return (n_buffers);
613 }
614
Damjan Marion910d3692019-01-21 11:48:34 +0100615 bp = vec_elt_at_index (bm->buffer_pools, buffer_pool_index);
616 bpt = vec_elt_at_index (bp->threads, vm->thread_index);
Damjan Marion878c6092017-01-04 13:19:27 +0100617
Damjan Marion910d3692019-01-21 11:48:34 +0100618 dst = buffers;
619 n_left = n_buffers;
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100620 len = bpt->n_cached;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100621
Damjan Marion910d3692019-01-21 11:48:34 +0100622 /* per-thread cache contains enough buffers */
623 if (len >= n_buffers)
Damjan Marionc8a26c62017-11-24 20:15:23 +0100624 {
Damjan Marion910d3692019-01-21 11:48:34 +0100625 src = bpt->cached_buffers + len - n_buffers;
626 vlib_buffer_copy_indices (dst, src, n_buffers);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100627 bpt->n_cached -= n_buffers;
628
629 if (CLIB_DEBUG > 0)
630 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
631 VLIB_BUFFER_KNOWN_FREE);
632 return n_buffers;
633 }
634
635 /* alloc bigger than cache - take buffers directly from main pool */
636 if (n_buffers >= VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ)
637 {
638 n_buffers = vlib_buffer_pool_get (vm, buffer_pool_index, buffers,
639 n_buffers);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100640
Damjan Marion910d3692019-01-21 11:48:34 +0100641 if (CLIB_DEBUG > 0)
642 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
643 VLIB_BUFFER_KNOWN_FREE);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100644 return n_buffers;
645 }
646
Damjan Marion910d3692019-01-21 11:48:34 +0100647 /* take everything available in the cache */
648 if (len)
649 {
650 vlib_buffer_copy_indices (dst, bpt->cached_buffers, len);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100651 bpt->n_cached = 0;
Damjan Marion910d3692019-01-21 11:48:34 +0100652 dst += len;
653 n_left -= len;
654 }
655
656 len = round_pow2 (n_left, 32);
Damjan Marion910d3692019-01-21 11:48:34 +0100657 len = vlib_buffer_pool_get (vm, buffer_pool_index, bpt->cached_buffers,
658 len);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100659 bpt->n_cached = len;
Damjan Marion910d3692019-01-21 11:48:34 +0100660
661 if (len)
662 {
663 u32 n_copy = clib_min (len, n_left);
664 src = bpt->cached_buffers + len - n_copy;
665 vlib_buffer_copy_indices (dst, src, n_copy);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100666 bpt->n_cached -= n_copy;
Damjan Marion910d3692019-01-21 11:48:34 +0100667 n_left -= n_copy;
668 }
669
670 n_buffers -= n_left;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100671
672 /* Verify that buffers are known free. */
Damjan Marion910d3692019-01-21 11:48:34 +0100673 if (CLIB_DEBUG > 0)
674 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
675 VLIB_BUFFER_KNOWN_FREE);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100676
677 return n_buffers;
678}
679
Damjan Marion910d3692019-01-21 11:48:34 +0100680/** \brief Allocate buffers from specific numa node into supplied array
681
682 @param vm - (vlib_main_t *) vlib main data structure pointer
683 @param buffers - (u32 * ) buffer index array
684 @param n_buffers - (u32) number of buffers requested
685 @param numa_node - (u32) numa node
686 @return - (u32) number of buffers actually allocated, may be
687 less than the number requested or zero
688*/
Damjan Mariona2185122020-04-08 00:52:53 +0200689always_inline __clib_warn_unused_result u32
Damjan Marion910d3692019-01-21 11:48:34 +0100690vlib_buffer_alloc_on_numa (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
691 u32 numa_node)
692{
693 u8 index = vlib_buffer_pool_get_default_for_numa (vm, numa_node);
694 return vlib_buffer_alloc_from_pool (vm, buffers, n_buffers, index);
695}
696
Damjan Marionc8a26c62017-11-24 20:15:23 +0100697/** \brief Allocate buffers into supplied array
698
699 @param vm - (vlib_main_t *) vlib main data structure pointer
700 @param buffers - (u32 * ) buffer index array
701 @param n_buffers - (u32) number of buffers requested
702 @return - (u32) number of buffers actually allocated, may be
703 less than the number requested or zero
704*/
Damjan Marion910d3692019-01-21 11:48:34 +0100705
Damjan Mariona2185122020-04-08 00:52:53 +0200706always_inline __clib_warn_unused_result u32
Damjan Marionc8a26c62017-11-24 20:15:23 +0100707vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
708{
Damjan Marion910d3692019-01-21 11:48:34 +0100709 return vlib_buffer_alloc_on_numa (vm, buffers, n_buffers, vm->numa_node);
Damjan Marion878c6092017-01-04 13:19:27 +0100710}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700711
Damjan Marionc58408c2018-01-18 14:54:04 +0100712/** \brief Allocate buffers into ring
713
714 @param vm - (vlib_main_t *) vlib main data structure pointer
715 @param buffers - (u32 * ) buffer index ring
716 @param start - (u32) first slot in the ring
717 @param ring_size - (u32) ring size
718 @param n_buffers - (u32) number of buffers requested
719 @return - (u32) number of buffers actually allocated, may be
720 less than the number requested or zero
721*/
Damjan Mariona2185122020-04-08 00:52:53 +0200722always_inline __clib_warn_unused_result u32
Damjan Marionc58408c2018-01-18 14:54:04 +0100723vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
724 u32 ring_size, u32 n_buffers)
725{
726 u32 n_alloc;
727
728 ASSERT (n_buffers <= ring_size);
729
730 if (PREDICT_TRUE (start + n_buffers <= ring_size))
731 return vlib_buffer_alloc (vm, ring + start, n_buffers);
732
733 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
734
735 if (PREDICT_TRUE (n_alloc == ring_size - start))
736 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
737
738 return n_alloc;
739}
740
Damjan Marion910d3692019-01-21 11:48:34 +0100741/** \brief Allocate buffers into ring from specific buffer pool
742
743 @param vm - (vlib_main_t *) vlib main data structure pointer
744 @param buffers - (u32 * ) buffer index ring
745 @param start - (u32) first slot in the ring
746 @param ring_size - (u32) ring size
747 @param n_buffers - (u32) number of buffers requested
748 @return - (u32) number of buffers actually allocated, may be
749 less than the number requested or zero
750*/
Damjan Mariona2185122020-04-08 00:52:53 +0200751always_inline __clib_warn_unused_result u32
Damjan Marion910d3692019-01-21 11:48:34 +0100752vlib_buffer_alloc_to_ring_from_pool (vlib_main_t * vm, u32 * ring, u32 start,
753 u32 ring_size, u32 n_buffers,
754 u8 buffer_pool_index)
755{
756 u32 n_alloc;
757
758 ASSERT (n_buffers <= ring_size);
759
760 if (PREDICT_TRUE (start + n_buffers <= ring_size))
761 return vlib_buffer_alloc_from_pool (vm, ring + start, n_buffers,
762 buffer_pool_index);
763
764 n_alloc = vlib_buffer_alloc_from_pool (vm, ring + start, ring_size - start,
765 buffer_pool_index);
766
767 if (PREDICT_TRUE (n_alloc == ring_size - start))
768 n_alloc += vlib_buffer_alloc_from_pool (vm, ring, n_buffers - n_alloc,
769 buffer_pool_index);
770
771 return n_alloc;
772}
773
Damjan Marionf646d742019-01-31 18:50:04 +0100774static_always_inline void
Damjan Marion910d3692019-01-21 11:48:34 +0100775vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index,
776 u32 * buffers, u32 n_buffers)
777{
778 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100779 vlib_buffer_pool_thread_t *bpt = vec_elt_at_index (bp->threads,
780 vm->thread_index);
781 u32 n_cached, n_empty;
Damjan Marion910d3692019-01-21 11:48:34 +0100782
Damjan Marionf646d742019-01-31 18:50:04 +0100783 if (CLIB_DEBUG > 0)
784 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
785 VLIB_BUFFER_KNOWN_ALLOCATED);
786
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100787 n_cached = bpt->n_cached;
788 n_empty = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ - n_cached;
789 if (n_buffers <= n_empty)
Damjan Marion910d3692019-01-21 11:48:34 +0100790 {
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100791 vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
792 buffers, n_buffers);
793 bpt->n_cached = n_cached + n_buffers;
794 return;
Damjan Marion910d3692019-01-21 11:48:34 +0100795 }
Damjan Marionb6e8b1a2019-03-12 18:14:15 +0100796
797 vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
798 buffers + n_buffers - n_empty, n_empty);
799 bpt->n_cached = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ;
800
801 clib_spinlock_lock (&bp->lock);
802 vlib_buffer_copy_indices (bp->buffers + bp->n_avail, buffers,
803 n_buffers - n_empty);
804 bp->n_avail += n_buffers - n_empty;
805 clib_spinlock_unlock (&bp->lock);
Damjan Marion910d3692019-01-21 11:48:34 +0100806}
807
808static_always_inline void
809vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
810 int maybe_next)
811{
812 const int queue_size = 128;
813 vlib_buffer_pool_t *bp = 0;
814 u8 buffer_pool_index = ~0;
815 u32 n_queue = 0, queue[queue_size + 4];
816 vlib_buffer_t bt = { };
Lijian.Zhange6a47cf2019-03-12 18:32:39 +0800817#if defined(CLIB_HAVE_VEC128)
Damjan Marion910d3692019-01-21 11:48:34 +0100818 vlib_buffer_t bpi_mask = {.buffer_pool_index = ~0 };
Radu Nicolaud82349e2021-03-16 12:45:01 +0000819 vlib_buffer_t bpi_vec = {};
Damjan Marion910d3692019-01-21 11:48:34 +0100820 vlib_buffer_t flags_refs_mask = {
821 .flags = VLIB_BUFFER_NEXT_PRESENT,
Damjan Marion0e209242019-03-13 12:04:10 +0100822 .ref_count = ~1
Damjan Marion910d3692019-01-21 11:48:34 +0100823 };
824#endif
825
Radu Nicolaud82349e2021-03-16 12:45:01 +0000826 if (PREDICT_FALSE (n_buffers == 0))
827 return;
828
829 vlib_buffer_t *b = vlib_get_buffer (vm, buffers[0]);
830 buffer_pool_index = b->buffer_pool_index;
831 bp = vlib_get_buffer_pool (vm, buffer_pool_index);
832 vlib_buffer_copy_template (&bt, &bp->buffer_template);
833#if defined(CLIB_HAVE_VEC128)
834 bpi_vec.buffer_pool_index = buffer_pool_index;
835#endif
836
Damjan Marion910d3692019-01-21 11:48:34 +0100837 while (n_buffers)
838 {
839 vlib_buffer_t *b[8];
840 u32 bi, sum = 0, flags, next;
841
Radu Nicolaud82349e2021-03-16 12:45:01 +0000842 if (n_buffers < 4)
Damjan Marion910d3692019-01-21 11:48:34 +0100843 goto one_by_one;
844
845 vlib_get_buffers (vm, buffers, b, 4);
Damjan Marion910d3692019-01-21 11:48:34 +0100846
Radu Nicolaud82349e2021-03-16 12:45:01 +0000847 if (n_buffers >= 12)
848 {
849 vlib_get_buffers (vm, buffers + 8, b + 4, 4);
850 vlib_prefetch_buffer_header (b[4], LOAD);
851 vlib_prefetch_buffer_header (b[5], LOAD);
852 vlib_prefetch_buffer_header (b[6], LOAD);
853 vlib_prefetch_buffer_header (b[7], LOAD);
854 }
Damjan Marion910d3692019-01-21 11:48:34 +0100855
Lijian.Zhange6a47cf2019-03-12 18:32:39 +0800856#if defined(CLIB_HAVE_VEC128)
Damjan Marion910d3692019-01-21 11:48:34 +0100857 u8x16 p0, p1, p2, p3, r;
858 p0 = u8x16_load_unaligned (b[0]);
859 p1 = u8x16_load_unaligned (b[1]);
860 p2 = u8x16_load_unaligned (b[2]);
861 p3 = u8x16_load_unaligned (b[3]);
862
863 r = p0 ^ bpi_vec.as_u8x16[0];
864 r |= p1 ^ bpi_vec.as_u8x16[0];
865 r |= p2 ^ bpi_vec.as_u8x16[0];
866 r |= p3 ^ bpi_vec.as_u8x16[0];
867 r &= bpi_mask.as_u8x16[0];
868 r |= (p0 | p1 | p2 | p3) & flags_refs_mask.as_u8x16[0];
869
870 sum = !u8x16_is_all_zero (r);
871#else
872 sum |= b[0]->flags;
873 sum |= b[1]->flags;
874 sum |= b[2]->flags;
875 sum |= b[3]->flags;
876 sum &= VLIB_BUFFER_NEXT_PRESENT;
877 sum += b[0]->ref_count - 1;
878 sum += b[1]->ref_count - 1;
879 sum += b[2]->ref_count - 1;
880 sum += b[3]->ref_count - 1;
881 sum |= b[0]->buffer_pool_index ^ buffer_pool_index;
882 sum |= b[1]->buffer_pool_index ^ buffer_pool_index;
883 sum |= b[2]->buffer_pool_index ^ buffer_pool_index;
884 sum |= b[3]->buffer_pool_index ^ buffer_pool_index;
885#endif
886
887 if (sum)
888 goto one_by_one;
889
890 vlib_buffer_copy_indices (queue + n_queue, buffers, 4);
891 vlib_buffer_copy_template (b[0], &bt);
892 vlib_buffer_copy_template (b[1], &bt);
893 vlib_buffer_copy_template (b[2], &bt);
894 vlib_buffer_copy_template (b[3], &bt);
895 n_queue += 4;
896
Damjan Marion24dcbe42019-01-31 12:29:39 +0100897 vlib_buffer_validate (vm, b[0]);
898 vlib_buffer_validate (vm, b[1]);
899 vlib_buffer_validate (vm, b[2]);
900 vlib_buffer_validate (vm, b[3]);
901
Damjan Marion910d3692019-01-21 11:48:34 +0100902 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
903 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
904 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
905 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
906
907 if (n_queue >= queue_size)
908 {
909 vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue);
910 n_queue = 0;
911 }
912 buffers += 4;
913 n_buffers -= 4;
914 continue;
915
916 one_by_one:
917 bi = buffers[0];
918
919 next_in_chain:
920 b[0] = vlib_get_buffer (vm, bi);
921 flags = b[0]->flags;
922 next = b[0]->next_buffer;
923
924 if (PREDICT_FALSE (buffer_pool_index != b[0]->buffer_pool_index))
925 {
Damjan Marion910d3692019-01-21 11:48:34 +0100926
927 if (n_queue)
928 {
929 vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue);
930 n_queue = 0;
931 }
Lollita Liu8f6c1dd2019-02-14 05:02:44 -0500932
933 buffer_pool_index = b[0]->buffer_pool_index;
Lijian.Zhange6a47cf2019-03-12 18:32:39 +0800934#if defined(CLIB_HAVE_VEC128)
Lollita Liu8f6c1dd2019-02-14 05:02:44 -0500935 bpi_vec.buffer_pool_index = buffer_pool_index;
936#endif
937 bp = vlib_get_buffer_pool (vm, buffer_pool_index);
938 vlib_buffer_copy_template (&bt, &bp->buffer_template);
Damjan Marion910d3692019-01-21 11:48:34 +0100939 }
940
Damjan Marion24dcbe42019-01-31 12:29:39 +0100941 vlib_buffer_validate (vm, b[0]);
Damjan Marion910d3692019-01-21 11:48:34 +0100942
943 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
944
945 if (clib_atomic_sub_fetch (&b[0]->ref_count, 1) == 0)
946 {
Damjan Marion910d3692019-01-21 11:48:34 +0100947 vlib_buffer_copy_template (b[0], &bt);
948 queue[n_queue++] = bi;
949 }
950
951 if (n_queue == queue_size)
952 {
953 vlib_buffer_pool_put (vm, buffer_pool_index, queue, queue_size);
954 n_queue = 0;
955 }
956
Damjan Marion5fed42a2019-06-11 20:12:23 +0200957 if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT))
Damjan Marion910d3692019-01-21 11:48:34 +0100958 {
959 bi = next;
960 goto next_in_chain;
961 }
962
963 buffers++;
964 n_buffers--;
965 }
966
967 if (n_queue)
968 vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue);
969}
970
971
Ed Warnickecb9cada2015-12-08 15:45:58 -0700972/** \brief Free buffers
973 Frees the entire buffer chain for each buffer
974
975 @param vm - (vlib_main_t *) vlib main data structure pointer
976 @param buffers - (u32 * ) buffer index array
977 @param n_buffers - (u32) number of buffers to free
978
979*/
Damjan Marion878c6092017-01-04 13:19:27 +0100980always_inline void
981vlib_buffer_free (vlib_main_t * vm,
982 /* pointer to first buffer */
983 u32 * buffers,
984 /* number of buffers to free */
985 u32 n_buffers)
986{
Damjan Marion910d3692019-01-21 11:48:34 +0100987 vlib_buffer_free_inline (vm, buffers, n_buffers, /* maybe next */ 1);
Damjan Marion878c6092017-01-04 13:19:27 +0100988}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700989
990/** \brief Free buffers, does not free the buffer chain for each buffer
991
992 @param vm - (vlib_main_t *) vlib main data structure pointer
993 @param buffers - (u32 * ) buffer index array
994 @param n_buffers - (u32) number of buffers to free
995
996*/
Damjan Marion878c6092017-01-04 13:19:27 +0100997always_inline void
998vlib_buffer_free_no_next (vlib_main_t * vm,
999 /* pointer to first buffer */
1000 u32 * buffers,
1001 /* number of buffers to free */
1002 u32 n_buffers)
1003{
Damjan Marion910d3692019-01-21 11:48:34 +01001004 vlib_buffer_free_inline (vm, buffers, n_buffers, /* maybe next */ 0);
Damjan Marion878c6092017-01-04 13:19:27 +01001005}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001006
1007/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -04001008 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -07001009
1010 @param vm - (vlib_main_t *) vlib main data structure pointer
1011 @param buffer_index - (u32) buffer index to free
1012*/
1013always_inline void
1014vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
1015{
Damjan Marion910d3692019-01-21 11:48:34 +01001016 vlib_buffer_free_inline (vm, &buffer_index, 1, /* maybe next */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001017}
1018
Damjan Mariona3731492018-02-25 22:50:39 +01001019/** \brief Free buffers from ring
1020
1021 @param vm - (vlib_main_t *) vlib main data structure pointer
1022 @param buffers - (u32 * ) buffer index ring
1023 @param start - (u32) first slot in the ring
1024 @param ring_size - (u32) ring size
1025 @param n_buffers - (u32) number of buffers
1026*/
1027always_inline void
1028vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
1029 u32 ring_size, u32 n_buffers)
1030{
1031 ASSERT (n_buffers <= ring_size);
1032
1033 if (PREDICT_TRUE (start + n_buffers <= ring_size))
1034 {
1035 vlib_buffer_free (vm, ring + start, n_buffers);
1036 }
1037 else
1038 {
1039 vlib_buffer_free (vm, ring + start, ring_size - start);
1040 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
1041 }
1042}
1043
Damjan Marioncef1db92018-03-28 18:27:38 +02001044/** \brief Free buffers from ring without freeing tail buffers
1045
1046 @param vm - (vlib_main_t *) vlib main data structure pointer
1047 @param buffers - (u32 * ) buffer index ring
1048 @param start - (u32) first slot in the ring
1049 @param ring_size - (u32) ring size
1050 @param n_buffers - (u32) number of buffers
1051*/
1052always_inline void
1053vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
1054 u32 ring_size, u32 n_buffers)
1055{
1056 ASSERT (n_buffers <= ring_size);
1057
1058 if (PREDICT_TRUE (start + n_buffers <= ring_size))
1059 {
Damjan Marion4a973932018-06-09 19:29:16 +02001060 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +02001061 }
1062 else
1063 {
1064 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
1065 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
1066 }
1067}
Damjan Mariona3731492018-02-25 22:50:39 +01001068
Ed Warnickecb9cada2015-12-08 15:45:58 -07001069/* Append given data to end of buffer, possibly allocating new buffers. */
Damjan Marionab9b7ec2019-01-18 20:24:44 +01001070int vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data,
1071 u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001072
John Lo8ed2d522019-08-07 19:30:29 -04001073/* Define vlib_buffer and vnet_buffer flags bits preserved for copy/clone */
1074#define VLIB_BUFFER_COPY_CLONE_FLAGS_MASK \
1075 (VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID | \
1076 VLIB_BUFFER_IS_TRACED | ~VLIB_BUFFER_FLAGS_ALL)
1077
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001078/* duplicate all buffers in chain */
1079always_inline vlib_buffer_t *
1080vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
1081{
1082 vlib_buffer_t *s, *d, *fd;
1083 uword n_alloc, n_buffers = 1;
John Lo8ed2d522019-08-07 19:30:29 -04001084 u32 flag_mask = VLIB_BUFFER_COPY_CLONE_FLAGS_MASK;
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001085 int i;
1086
1087 s = b;
1088 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1089 {
1090 n_buffers++;
1091 s = vlib_get_buffer (vm, s->next_buffer);
1092 }
Neale Ranns9d676af2017-03-15 01:28:31 -07001093 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001094
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001095 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -05001096
1097 /* No guarantee that we'll get all the buffers we asked for */
1098 if (PREDICT_FALSE (n_alloc < n_buffers))
1099 {
1100 if (n_alloc > 0)
1101 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -05001102 return 0;
1103 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001104
1105 /* 1st segment */
1106 s = b;
1107 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001108 d->current_data = s->current_data;
1109 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +01001110 d->flags = s->flags & flag_mask;
John Lo66317802019-08-13 18:18:21 -04001111 d->trace_handle = s->trace_handle;
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001112 d->total_length_not_including_first_buffer =
1113 s->total_length_not_including_first_buffer;
Dave Barach178cf492018-11-13 16:34:13 -05001114 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
1115 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
1116 clib_memcpy_fast (vlib_buffer_get_current (d),
1117 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001118
1119 /* next segments */
1120 for (i = 1; i < n_buffers; i++)
1121 {
1122 /* previous */
1123 d->next_buffer = new_buffers[i];
1124 /* current */
1125 s = vlib_get_buffer (vm, s->next_buffer);
1126 d = vlib_get_buffer (vm, new_buffers[i]);
1127 d->current_data = s->current_data;
1128 d->current_length = s->current_length;
Dave Barach178cf492018-11-13 16:34:13 -05001129 clib_memcpy_fast (vlib_buffer_get_current (d),
1130 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +01001131 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +01001132 }
1133
1134 return fd;
1135}
1136
Ole Troanda7f7b62019-03-11 13:15:54 +01001137/* duplicate first buffer in chain */
1138always_inline vlib_buffer_t *
1139vlib_buffer_copy_no_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * di)
1140{
1141 vlib_buffer_t *d;
1142
1143 if ((vlib_buffer_alloc (vm, di, 1)) != 1)
1144 return 0;
1145
1146 d = vlib_get_buffer (vm, *di);
1147 /* 1st segment */
1148 d->current_data = b->current_data;
1149 d->current_length = b->current_length;
1150 clib_memcpy_fast (d->opaque, b->opaque, sizeof (b->opaque));
1151 clib_memcpy_fast (d->opaque2, b->opaque2, sizeof (b->opaque2));
1152 clib_memcpy_fast (vlib_buffer_get_current (d),
1153 vlib_buffer_get_current (b), b->current_length);
1154
1155 return d;
1156}
1157
John Lof545caa2019-04-01 11:30:07 -04001158/* \brief Move packet from current position to offset position in buffer.
1159 Only work for small packet using one buffer with room to fit the move
1160 @param vm - (vlib_main_t *) vlib main data structure pointer
1161 @param b - (vlib_buffer_t *) pointer to buffer
1162 @param offset - (i16) position to move the packet in buffer
1163 */
1164always_inline void
1165vlib_buffer_move (vlib_main_t * vm, vlib_buffer_t * b, i16 offset)
1166{
1167 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1168 ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0);
1169 ASSERT (offset + b->current_length <
1170 vlib_buffer_get_default_data_size (vm));
1171
1172 u8 *source = vlib_buffer_get_current (b);
1173 b->current_data = offset;
1174 u8 *destination = vlib_buffer_get_current (b);
1175 u16 length = b->current_length;
1176
1177 if (source + length <= destination) /* no overlap */
1178 clib_memcpy_fast (destination, source, length);
1179 else
1180 memmove (destination, source, length);
1181}
1182
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001183/** \brief Create a maximum of 256 clones of buffer and store them
1184 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +01001185
1186 @param vm - (vlib_main_t *) vlib main data structure pointer
1187 @param src_buffer - (u32) source buffer index
1188 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001189 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +01001190 @param head_end_offset - (u16) offset relative to current position
1191 where packet head ends
John Lof545caa2019-04-01 11:30:07 -04001192 @param offset - (i16) copy packet head at current position if 0,
1193 else at offset position to change headroom space as specified
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001194 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +01001195 less than the number requested or zero
1196*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001197always_inline u16
1198vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
John Lof545caa2019-04-01 11:30:07 -04001199 u16 n_buffers, u16 head_end_offset, i16 offset)
Damjan Marionc47ed032017-01-25 14:18:03 +01001200{
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001201 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +01001202 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
1203
Damjan Marion910d3692019-01-21 11:48:34 +01001204 ASSERT (s->ref_count == 1);
Damjan Marionc47ed032017-01-25 14:18:03 +01001205 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001206 ASSERT (n_buffers <= 256);
John Lof545caa2019-04-01 11:30:07 -04001207 ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0);
1208 ASSERT ((offset + head_end_offset) <
1209 vlib_buffer_get_default_data_size (vm));
Damjan Marionc47ed032017-01-25 14:18:03 +01001210
1211 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
1212 {
1213 buffers[0] = src_buffer;
John Lof545caa2019-04-01 11:30:07 -04001214 if (offset)
1215 vlib_buffer_move (vm, s, offset);
1216
Damjan Marionc47ed032017-01-25 14:18:03 +01001217 for (i = 1; i < n_buffers; i++)
1218 {
1219 vlib_buffer_t *d;
1220 d = vlib_buffer_copy (vm, s);
1221 if (d == 0)
1222 return i;
1223 buffers[i] = vlib_get_buffer_index (vm, d);
1224
1225 }
1226 return n_buffers;
1227 }
1228
John Lof545caa2019-04-01 11:30:07 -04001229 if (PREDICT_FALSE ((n_buffers == 1) && (offset == 0)))
Damjan Marionc47ed032017-01-25 14:18:03 +01001230 {
1231 buffers[0] = src_buffer;
1232 return 1;
1233 }
1234
Damjan Marion910d3692019-01-21 11:48:34 +01001235 n_buffers = vlib_buffer_alloc_from_pool (vm, buffers, n_buffers,
1236 s->buffer_pool_index);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001237
Damjan Marionc47ed032017-01-25 14:18:03 +01001238 for (i = 0; i < n_buffers; i++)
1239 {
1240 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
John Lof545caa2019-04-01 11:30:07 -04001241 if (offset)
1242 d->current_data = offset;
1243 else
1244 d->current_data = s->current_data;
1245
Damjan Marionc47ed032017-01-25 14:18:03 +01001246 d->current_length = head_end_offset;
Damjan Marion910d3692019-01-21 11:48:34 +01001247 ASSERT (d->buffer_pool_index == s->buffer_pool_index);
1248
Yoann Desmouceaux1977a342018-05-29 13:38:44 +02001249 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +01001250 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +02001251 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
1252 {
1253 d->total_length_not_including_first_buffer +=
1254 s->total_length_not_including_first_buffer;
1255 }
John Lo8ed2d522019-08-07 19:30:29 -04001256 d->flags = (s->flags & VLIB_BUFFER_COPY_CLONE_FLAGS_MASK) |
1257 VLIB_BUFFER_NEXT_PRESENT;
John Lo66317802019-08-13 18:18:21 -04001258 d->trace_handle = s->trace_handle;
Dave Barach178cf492018-11-13 16:34:13 -05001259 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
1260 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
1261 clib_memcpy_fast (vlib_buffer_get_current (d),
1262 vlib_buffer_get_current (s), head_end_offset);
Damjan Marionc47ed032017-01-25 14:18:03 +01001263 d->next_buffer = src_buffer;
1264 }
1265 vlib_buffer_advance (s, head_end_offset);
Dave Barach95e19252020-04-07 10:52:43 -04001266 s->ref_count = n_buffers ? n_buffers : s->ref_count;
Damjan Marionc47ed032017-01-25 14:18:03 +01001267 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1268 {
1269 s = vlib_get_buffer (vm, s->next_buffer);
Dave Barach95e19252020-04-07 10:52:43 -04001270 s->ref_count = n_buffers ? n_buffers : s->ref_count;
Damjan Marionc47ed032017-01-25 14:18:03 +01001271 }
1272
1273 return n_buffers;
1274}
1275
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001276/** \brief Create multiple clones of buffer and store them
1277 in the supplied array
1278
1279 @param vm - (vlib_main_t *) vlib main data structure pointer
1280 @param src_buffer - (u32) source buffer index
1281 @param buffers - (u32 * ) buffer index array
1282 @param n_buffers - (u16) number of buffer clones requested (<=256)
1283 @param head_end_offset - (u16) offset relative to current position
1284 where packet head ends
John Lof545caa2019-04-01 11:30:07 -04001285 @param offset - (i16) copy packet head at current position if 0,
1286 else at offset position to change headroom space as specified
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001287 @return - (u16) number of buffers actually cloned, may be
1288 less than the number requested or zero
1289*/
1290always_inline u16
John Lof545caa2019-04-01 11:30:07 -04001291vlib_buffer_clone_at_offset (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
1292 u16 n_buffers, u16 head_end_offset, i16 offset)
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001293{
1294 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
1295 u16 n_cloned = 0;
1296
1297 while (n_buffers > 256)
1298 {
1299 vlib_buffer_t *copy;
1300 copy = vlib_buffer_copy (vm, s);
1301 n_cloned += vlib_buffer_clone_256 (vm,
1302 vlib_get_buffer_index (vm, copy),
1303 (buffers + n_cloned),
John Lof545caa2019-04-01 11:30:07 -04001304 256, head_end_offset, offset);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001305 n_buffers -= 256;
1306 }
1307 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
1308 buffers + n_cloned,
John Lof545caa2019-04-01 11:30:07 -04001309 n_buffers, head_end_offset, offset);
Neale Ranns8f36e4a2018-01-11 09:02:01 -08001310
1311 return n_cloned;
1312}
1313
John Lof545caa2019-04-01 11:30:07 -04001314/** \brief Create multiple clones of buffer and store them
1315 in the supplied array
1316
1317 @param vm - (vlib_main_t *) vlib main data structure pointer
1318 @param src_buffer - (u32) source buffer index
1319 @param buffers - (u32 * ) buffer index array
1320 @param n_buffers - (u16) number of buffer clones requested (<=256)
1321 @param head_end_offset - (u16) offset relative to current position
1322 where packet head ends
1323 @return - (u16) number of buffers actually cloned, may be
1324 less than the number requested or zero
1325*/
1326always_inline u16
1327vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
1328 u16 n_buffers, u16 head_end_offset)
1329{
1330 return vlib_buffer_clone_at_offset (vm, src_buffer, buffers, n_buffers,
1331 head_end_offset, 0);
1332}
1333
Damjan Marionc47ed032017-01-25 14:18:03 +01001334/** \brief Attach cloned tail to the buffer
1335
1336 @param vm - (vlib_main_t *) vlib main data structure pointer
1337 @param head - (vlib_buffer_t *) head buffer
1338 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
1339*/
1340
1341always_inline void
1342vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
1343 vlib_buffer_t * tail)
1344{
1345 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion910d3692019-01-21 11:48:34 +01001346 ASSERT (head->buffer_pool_index == tail->buffer_pool_index);
Damjan Marionc47ed032017-01-25 14:18:03 +01001347
1348 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
1349 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1350 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
1351 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
1352 head->next_buffer = vlib_get_buffer_index (vm, tail);
1353 head->total_length_not_including_first_buffer = tail->current_length +
1354 tail->total_length_not_including_first_buffer;
1355
1356next_segment:
Damjan Marion910d3692019-01-21 11:48:34 +01001357 clib_atomic_add_fetch (&tail->ref_count, 1);
Damjan Marionc47ed032017-01-25 14:18:03 +01001358
1359 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
1360 {
1361 tail = vlib_get_buffer (vm, tail->next_buffer);
1362 goto next_segment;
1363 }
1364}
1365
Pierre Pfister328e99b2016-02-12 13:18:42 +00001366/* Initializes the buffer as an empty packet with no chained buffers. */
1367always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -04001368vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001369{
1370 first->total_length_not_including_first_buffer = 0;
1371 first->current_length = 0;
1372 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1373 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +00001374}
1375
1376/* The provided next_bi buffer index is appended to the end of the packet. */
1377always_inline vlib_buffer_t *
Eyal Barib688fb12018-11-12 16:13:49 +02001378vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001379{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001380 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001381 last->next_buffer = next_bi;
1382 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
1383 next_buffer->current_length = 0;
1384 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +00001385 return next_buffer;
1386}
1387
1388/* Increases or decreases the packet length.
1389 * It does not allocate or deallocate new buffers.
1390 * Therefore, the added length must be compatible
1391 * with the last buffer. */
1392always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -04001393vlib_buffer_chain_increase_length (vlib_buffer_t * first,
1394 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001395{
1396 last->current_length += len;
1397 if (first != last)
1398 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +00001399}
1400
1401/* Copy data to the end of the packet and increases its length.
1402 * It does not allocate new buffers.
1403 * Returns the number of copied bytes. */
1404always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -04001405vlib_buffer_chain_append_data (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001406 vlib_buffer_t * first,
1407 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +00001408{
Damjan Marion8934a042019-02-09 23:29:26 +01001409 u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001410 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
1411 u16 len = clib_min (data_len,
1412 n_buffer_bytes - last->current_length -
1413 last->current_data);
Dave Barach178cf492018-11-13 16:34:13 -05001414 clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length,
1415 data, len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001416 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001417 return len;
1418}
1419
1420/* Copy data to the end of the packet and increases its length.
1421 * Allocates additional buffers from the free list if necessary.
1422 * Returns the number of copied bytes.
1423 * 'last' value is modified whenever new buffers are allocated and
1424 * chained and points to the last buffer in the chain. */
1425u16
Dave Barach9b8ffd92016-07-08 08:13:45 -04001426vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001427 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +01001428 vlib_buffer_t ** last, void *data,
1429 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001430void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001431
Dave Barach9b8ffd92016-07-08 08:13:45 -04001432format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
Benoît Ganne43543172019-10-21 15:13:54 +02001433 format_vlib_buffer_contents, format_vlib_buffer_no_chain;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001434
Dave Barach9b8ffd92016-07-08 08:13:45 -04001435typedef struct
1436{
Ed Warnickecb9cada2015-12-08 15:45:58 -07001437 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001438 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001439
Damjan Mariond1274cb2018-03-13 21:32:17 +01001440 /* Number of buffers to allocate in each call to allocator. */
1441 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001442
Damjan Marion671e60e2018-12-30 18:09:59 +01001443 u8 *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001444} vlib_packet_template_t;
1445
Ed Warnickecb9cada2015-12-08 15:45:58 -07001446void vlib_packet_template_init (vlib_main_t * vm,
1447 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001448 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001449 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +01001450 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001451 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001452
Dave Barach9b8ffd92016-07-08 08:13:45 -04001453void *vlib_packet_template_get_packet (vlib_main_t * vm,
1454 vlib_packet_template_t * t,
1455 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001456
1457always_inline void
1458vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1459{
1460 vec_free (t->packet_data);
1461}
1462
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001463always_inline u32
1464vlib_buffer_space_left_at_end (vlib_main_t * vm, vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +02001465{
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001466 return b->data + vlib_buffer_get_default_data_size (vm) -
1467 ((u8 *) vlib_buffer_get_current (b) + b->current_length);
Klement Sekera75e7d132017-09-20 08:26:30 +02001468}
1469
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001470always_inline u32
1471vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b)
Eyal Barid3d42412018-11-05 13:29:25 +02001472{
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001473 vlib_buffer_t *db = b, *sb, *first = b;
1474 int is_cloned = 0;
1475 u32 bytes_left = 0, data_size;
1476 u16 src_left, dst_left, n_buffers = 1;
1477 u8 *dp, *sp;
1478 u32 to_free = 0;
Eyal Barid3d42412018-11-05 13:29:25 +02001479
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001480 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
1481 return 1;
Eyal Barid3d42412018-11-05 13:29:25 +02001482
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001483 data_size = vlib_buffer_get_default_data_size (vm);
1484
1485 dst_left = vlib_buffer_space_left_at_end (vm, b);
1486
1487 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
Eyal Barid3d42412018-11-05 13:29:25 +02001488 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001489 b = vlib_get_buffer (vm, b->next_buffer);
1490 if (b->ref_count > 1)
1491 is_cloned = 1;
1492 bytes_left += b->current_length;
1493 n_buffers++;
Eyal Barid3d42412018-11-05 13:29:25 +02001494 }
1495
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001496 /* if buffer is cloned, create completely new chain - unless everything fits
1497 * into one buffer */
1498 if (is_cloned && bytes_left >= dst_left)
Eyal Barid3d42412018-11-05 13:29:25 +02001499 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001500 u32 len = 0;
1501 u32 space_needed = bytes_left - dst_left;
1502 u32 tail;
1503
1504 if (vlib_buffer_alloc (vm, &tail, 1) == 0)
1505 return 0;
1506
1507 ++n_buffers;
1508 len += data_size;
1509 b = vlib_get_buffer (vm, tail);
1510
1511 while (len < space_needed)
Eyal Barid3d42412018-11-05 13:29:25 +02001512 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001513 u32 bi;
1514 if (vlib_buffer_alloc (vm, &bi, 1) == 0)
1515 {
1516 vlib_buffer_free_one (vm, tail);
1517 return 0;
1518 }
1519 b->flags = VLIB_BUFFER_NEXT_PRESENT;
1520 b->next_buffer = bi;
1521 b = vlib_get_buffer (vm, bi);
1522 len += data_size;
1523 n_buffers++;
1524 }
1525 sb = vlib_get_buffer (vm, first->next_buffer);
1526 to_free = first->next_buffer;
1527 first->next_buffer = tail;
1528 }
1529 else
1530 sb = vlib_get_buffer (vm, first->next_buffer);
1531
1532 src_left = sb->current_length;
1533 sp = vlib_buffer_get_current (sb);
1534 dp = vlib_buffer_get_tail (db);
1535
1536 while (bytes_left)
1537 {
1538 u16 bytes_to_copy;
1539
1540 if (dst_left == 0)
1541 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001542 db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
1543 ASSERT (db->flags & VLIB_BUFFER_NEXT_PRESENT);
1544 db = vlib_get_buffer (vm, db->next_buffer);
1545 dst_left = data_size;
Klement Sekerac09b7fd2019-06-04 21:14:26 +02001546 if (db->current_data > 0)
1547 {
1548 db->current_data = 0;
1549 }
1550 else
1551 {
1552 dst_left += -db->current_data;
1553 }
1554 dp = vlib_buffer_get_current (db);
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001555 }
1556
1557 while (src_left == 0)
1558 {
1559 ASSERT (sb->flags & VLIB_BUFFER_NEXT_PRESENT);
1560 sb = vlib_get_buffer (vm, sb->next_buffer);
1561 src_left = sb->current_length;
1562 sp = vlib_buffer_get_current (sb);
1563 }
1564
1565 bytes_to_copy = clib_min (dst_left, src_left);
1566
1567 if (dp != sp)
1568 {
1569 if (sb == db)
1570 bytes_to_copy = clib_min (bytes_to_copy, sp - dp);
1571
1572 clib_memcpy_fast (dp, sp, bytes_to_copy);
1573 }
1574
1575 src_left -= bytes_to_copy;
1576 dst_left -= bytes_to_copy;
1577 dp += bytes_to_copy;
1578 sp += bytes_to_copy;
1579 bytes_left -= bytes_to_copy;
1580 }
1581 if (db != first)
1582 db->current_data = 0;
1583 db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
1584
1585 if (is_cloned && to_free)
1586 vlib_buffer_free_one (vm, to_free);
1587 else
1588 {
1589 if (db->flags & VLIB_BUFFER_NEXT_PRESENT)
1590 vlib_buffer_free_one (vm, db->next_buffer);
1591 db->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1592 b = first;
1593 n_buffers = 1;
1594 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1595 {
1596 b = vlib_get_buffer (vm, b->next_buffer);
1597 ++n_buffers;
Eyal Barid3d42412018-11-05 13:29:25 +02001598 }
1599 }
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001600
1601 first->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1602
1603 return n_buffers;
Eyal Barid3d42412018-11-05 13:29:25 +02001604}
1605
Ed Warnickecb9cada2015-12-08 15:45:58 -07001606#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001607
1608/*
1609 * fd.io coding-style-patch-verification: ON
1610 *
1611 * Local Variables:
1612 * eval: (c-set-style "gnu")
1613 * End:
1614 */