blob: 9095bd3387fdbf3005d1681962ffb66e7d10c9b4 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
46/** \file
47 vlib buffer access methods.
48*/
49
50
51/** \brief Translate buffer index into buffer pointer
52
53 @param vm - (vlib_main_t *) vlib main data structure pointer
54 @param buffer_index - (u32) buffer index
55 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040056*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070057always_inline vlib_buffer_t *
58vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
59{
Damjan Mariond50e3472019-01-20 00:03:56 +010060 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020061 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
62 ASSERT (offset < bm->buffer_mem_size);
63
64 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070065}
66
Damjan Marione58041f2019-01-18 19:56:09 +010067static_always_inline void
Damjan Marion64d557c2019-01-18 20:03:41 +010068vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices)
69{
70 clib_memcpy_fast (dst, src, n_indices * sizeof (u32));
71}
72
73static_always_inline void
Damjan Marione58041f2019-01-18 19:56:09 +010074vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
75{
76 clib_memcpy_fast (b, bt, STRUCT_OFFSET_OF (vlib_buffer_t, template_end));
77}
78
Damjan Marionafe56de2018-05-17 12:44:00 +020079/** \brief Translate array of buffer indices into buffer pointers with offset
80
81 @param vm - (vlib_main_t *) vlib main data structure pointer
82 @param bi - (u32 *) array of buffer indices
83 @param b - (void **) array to store buffer pointers
84 @param count - (uword) number of elements
85 @param offset - (i32) offset applied to each pointer
86*/
87static_always_inline void
88vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
89 i32 offset)
90{
Damjan Mariond50e3472019-01-20 00:03:56 +010091 uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
Damjan Marionafe56de2018-05-17 12:44:00 +020092#ifdef CLIB_HAVE_VEC256
Damjan Mariond50e3472019-01-20 00:03:56 +010093 u64x4 off = u64x4_splat (buffer_mem_start + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +020094 /* if count is not const, compiler will not unroll while loop
95 se we maintain two-in-parallel variant */
96 while (count >= 8)
97 {
98 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
99 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
100 /* shift and add to get vlib_buffer_t pointer */
101 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
102 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
103 b += 8;
104 bi += 8;
105 count -= 8;
106 }
107#endif
108 while (count >= 4)
109 {
110#ifdef CLIB_HAVE_VEC256
111 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
112 /* shift and add to get vlib_buffer_t pointer */
113 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800114#elif defined (CLIB_HAVE_VEC128)
Damjan Mariond50e3472019-01-20 00:03:56 +0100115 u64x2 off = u64x2_splat (buffer_mem_start + offset);
Damjan Marion5df580e2018-07-27 01:47:57 +0200116 u32x4 bi4 = u32x4_load_unaligned (bi);
117 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800118#if defined (__aarch64__)
119 u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
120#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200121 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
122 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800123#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200124 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
125 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200126#else
127 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
128 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
129 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
130 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
131#endif
132 b += 4;
133 bi += 4;
134 count -= 4;
135 }
136 while (count)
137 {
138 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
139 b += 1;
140 bi += 1;
141 count -= 1;
142 }
143}
144
145/** \brief Translate array of buffer indices into buffer pointers
146
147 @param vm - (vlib_main_t *) vlib main data structure pointer
148 @param bi - (u32 *) array of buffer indices
149 @param b - (vlib_buffer_t **) array to store buffer pointers
150 @param count - (uword) number of elements
151*/
152
153static_always_inline void
154vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
155{
156 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
157}
158
Ed Warnickecb9cada2015-12-08 15:45:58 -0700159/** \brief Translate buffer pointer into buffer index
160
161 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400162 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400164*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200165
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400167vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168{
Damjan Mariond50e3472019-01-20 00:03:56 +0100169 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200170 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
171 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
172 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400173 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700174 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
175}
176
Damjan Marionafe56de2018-05-17 12:44:00 +0200177/** \brief Translate array of buffer pointers into buffer indices with offset
178
179 @param vm - (vlib_main_t *) vlib main data structure pointer
180 @param b - (void **) array of buffer pointers
181 @param bi - (u32 *) array to store buffer indices
182 @param count - (uword) number of elements
183 @param offset - (i32) offset applied to each pointer
184*/
185static_always_inline void
186vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
187 uword count, i32 offset)
188{
189#ifdef CLIB_HAVE_VEC256
190 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
Damjan Mariond50e3472019-01-20 00:03:56 +0100191 u64x4 off4 = u64x4_splat (vm->buffer_main->buffer_mem_start - offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200192
193 while (count >= 8)
194 {
195 /* load 4 pointers into 256-bit register */
196 u64x4 v0 = u64x4_load_unaligned (b);
197 u64x4 v1 = u64x4_load_unaligned (b + 4);
198 u32x8 v2, v3;
199
200 v0 -= off4;
201 v1 -= off4;
202
203 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
204 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
205
206 /* permute 256-bit register so lower u32s of each buffer index are
207 * placed into lower 128-bits */
208 v2 = u32x8_permute ((u32x8) v0, mask);
209 v3 = u32x8_permute ((u32x8) v1, mask);
210
211 /* extract lower 128-bits and save them to the array of buffer indices */
212 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
213 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
214 bi += 8;
215 b += 8;
216 count -= 8;
217 }
218#endif
219 while (count >= 4)
220 {
221 /* equivalent non-nector implementation */
222 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
223 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
224 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
225 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
226 bi += 4;
227 b += 4;
228 count -= 4;
229 }
230 while (count)
231 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400232 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200233 bi += 1;
234 b += 1;
235 count -= 1;
236 }
237}
238
239/** \brief Translate array of buffer pointers into buffer indices
240
241 @param vm - (vlib_main_t *) vlib main data structure pointer
242 @param b - (vlib_buffer_t **) array of buffer pointers
243 @param bi - (u32 *) array to store buffer indices
244 @param count - (uword) number of elements
245*/
246static_always_inline void
247vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
248 uword count)
249{
250 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
251}
252
Ed Warnickecb9cada2015-12-08 15:45:58 -0700253/** \brief Get next buffer in buffer linklist, or zero for end of list.
254
255 @param vm - (vlib_main_t *) vlib main data structure pointer
256 @param b - (void *) buffer pointer
257 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400258*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259always_inline vlib_buffer_t *
260vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
261{
262 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400263 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264}
265
Dave Barach9b8ffd92016-07-08 08:13:45 -0400266uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
267 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268
269/** \brief Get length in bytes of the buffer chain
270
271 @param vm - (vlib_main_t *) vlib main data structure pointer
272 @param b - (void *) buffer pointer
273 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400274*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275always_inline uword
276vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
277{
Damjan Marion072401e2017-07-13 18:53:27 +0200278 uword len = b->current_length;
279
280 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
281 return len;
282
283 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
284 return len + b->total_length_not_including_first_buffer;
285
286 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287}
288
289/** \brief Get length in bytes of the buffer index buffer chain
290
291 @param vm - (vlib_main_t *) vlib main data structure pointer
292 @param bi - (u32) buffer index
293 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400294*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700295always_inline uword
296vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
297{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400298 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700299 return vlib_buffer_length_in_chain (vm, b);
300}
301
302/** \brief Copy buffer contents to memory
303
304 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400305 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700306 @param contents - (u8 *) memory, <strong>must be large enough</strong>
307 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400308*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309always_inline uword
310vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
311{
312 uword content_len = 0;
313 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400314 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315
316 while (1)
317 {
318 b = vlib_get_buffer (vm, buffer_index);
319 l = b->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500320 clib_memcpy_fast (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400322 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700323 break;
324 buffer_index = b->next_buffer;
325 }
326
327 return content_len;
328}
329
Damjan Marion8f499362018-10-22 13:07:02 +0200330always_inline uword
331vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332{
Damjan Marion68b4da62018-09-30 18:26:20 +0200333 return vlib_physmem_get_pa (vm, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334}
335
Damjan Marion8f499362018-10-22 13:07:02 +0200336always_inline uword
337vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b)
338{
339 return vlib_buffer_get_pa (vm, b) + b->current_data;
340}
341
Ed Warnickecb9cada2015-12-08 15:45:58 -0700342/** \brief Prefetch buffer metadata by buffer index
343 The first 64 bytes of buffer contains most header information
344
345 @param vm - (vlib_main_t *) vlib main data structure pointer
346 @param bi - (u32) buffer index
347 @param type - LOAD, STORE. In most cases, STORE is the right answer
348*/
349/* Prefetch buffer header given index. */
350#define vlib_prefetch_buffer_with_index(vm,bi,type) \
351 do { \
352 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
353 vlib_prefetch_buffer_header (_b, type); \
354 } while (0)
355
Dave Barach9b8ffd92016-07-08 08:13:45 -0400356typedef enum
357{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358 /* Index is unknown. */
359 VLIB_BUFFER_UNKNOWN,
360
361 /* Index is known and free/allocated. */
362 VLIB_BUFFER_KNOWN_FREE,
363 VLIB_BUFFER_KNOWN_ALLOCATED,
364} vlib_buffer_known_state_t;
365
Damjan Marionc8a26c62017-11-24 20:15:23 +0100366void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
367 uword n_buffers,
368 vlib_buffer_known_state_t
369 expected_state);
370
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371always_inline vlib_buffer_known_state_t
Damjan Mariond50e3472019-01-20 00:03:56 +0100372vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700373{
Damjan Mariond50e3472019-01-20 00:03:56 +0100374 vlib_buffer_main_t *bm = vm->buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700375
Damjan Marion6b0f5892017-07-27 04:01:24 -0400376 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400377 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400378 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
380}
381
382always_inline void
Damjan Mariond50e3472019-01-20 00:03:56 +0100383vlib_buffer_set_known_state (vlib_main_t * vm, u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700384 vlib_buffer_known_state_t state)
385{
Damjan Mariond50e3472019-01-20 00:03:56 +0100386 vlib_buffer_main_t *bm = vm->buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800387
Damjan Marion6b0f5892017-07-27 04:01:24 -0400388 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700389 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400390 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700391}
392
393/* Validates sanity of a single buffer.
394 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400395u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
396 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397
Ed Warnickecb9cada2015-12-08 15:45:58 -0700398always_inline u32
399vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400400{
401 return round_pow2 (size, sizeof (vlib_buffer_t));
402}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700403
404/** \brief Allocate buffers from specific freelist into supplied array
405
406 @param vm - (vlib_main_t *) vlib main data structure pointer
407 @param buffers - (u32 * ) buffer index array
408 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400409 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700410 less than the number requested or zero
411*/
Damjan Marion878c6092017-01-04 13:19:27 +0100412always_inline u32
413vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
414 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100415 u32 n_buffers,
416 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100417{
Damjan Marionc8a26c62017-11-24 20:15:23 +0100418 vlib_buffer_free_list_t *fl;
Damjan Mariond50e3472019-01-20 00:03:56 +0100419 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100420 u32 *src;
421 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100422
Damjan Marionc8a26c62017-11-24 20:15:23 +0100423 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100424
Damjan Mariond1274cb2018-03-13 21:32:17 +0100425 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100426
427 len = vec_len (fl->buffers);
428
429 if (PREDICT_FALSE (len < n_buffers))
430 {
431 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100432 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
433 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100434
435 /* even if fill free list didn't manage to refill free list
436 we should give what we have */
437 n_buffers = clib_min (len, n_buffers);
438
439 /* following code is intentionaly duplicated to allow compiler
440 to optimize fast path when n_buffers is constant value */
441 src = fl->buffers + len - n_buffers;
Damjan Marion64d557c2019-01-18 20:03:41 +0100442 vlib_buffer_copy_indices (buffers, src, n_buffers);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100443 _vec_len (fl->buffers) -= n_buffers;
444
445 /* Verify that buffers are known free. */
446 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
447 VLIB_BUFFER_KNOWN_FREE);
448
449 return n_buffers;
450 }
451
452 src = fl->buffers + len - n_buffers;
Damjan Marion64d557c2019-01-18 20:03:41 +0100453 vlib_buffer_copy_indices (buffers, src, n_buffers);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100454 _vec_len (fl->buffers) -= n_buffers;
455
456 /* Verify that buffers are known free. */
457 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
458 VLIB_BUFFER_KNOWN_FREE);
459
460 return n_buffers;
461}
462
463/** \brief Allocate buffers into supplied array
464
465 @param vm - (vlib_main_t *) vlib main data structure pointer
466 @param buffers - (u32 * ) buffer index array
467 @param n_buffers - (u32) number of buffers requested
468 @return - (u32) number of buffers actually allocated, may be
469 less than the number requested or zero
470*/
471always_inline u32
472vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
473{
474 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
475 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100476}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700477
Damjan Marionc58408c2018-01-18 14:54:04 +0100478/** \brief Allocate buffers into ring
479
480 @param vm - (vlib_main_t *) vlib main data structure pointer
481 @param buffers - (u32 * ) buffer index ring
482 @param start - (u32) first slot in the ring
483 @param ring_size - (u32) ring size
484 @param n_buffers - (u32) number of buffers requested
485 @return - (u32) number of buffers actually allocated, may be
486 less than the number requested or zero
487*/
488always_inline u32
489vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
490 u32 ring_size, u32 n_buffers)
491{
492 u32 n_alloc;
493
494 ASSERT (n_buffers <= ring_size);
495
496 if (PREDICT_TRUE (start + n_buffers <= ring_size))
497 return vlib_buffer_alloc (vm, ring + start, n_buffers);
498
499 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
500
501 if (PREDICT_TRUE (n_alloc == ring_size - start))
502 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
503
504 return n_alloc;
505}
506
Ed Warnickecb9cada2015-12-08 15:45:58 -0700507/** \brief Free buffers
508 Frees the entire buffer chain for each buffer
509
510 @param vm - (vlib_main_t *) vlib main data structure pointer
511 @param buffers - (u32 * ) buffer index array
512 @param n_buffers - (u32) number of buffers to free
513
514*/
Damjan Marion878c6092017-01-04 13:19:27 +0100515always_inline void
516vlib_buffer_free (vlib_main_t * vm,
517 /* pointer to first buffer */
518 u32 * buffers,
519 /* number of buffers to free */
520 u32 n_buffers)
521{
Damjan Mariond50e3472019-01-20 00:03:56 +0100522 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100523
524 ASSERT (bm->cb.vlib_buffer_free_cb);
525
526 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
527}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700528
529/** \brief Free buffers, does not free the buffer chain for each buffer
530
531 @param vm - (vlib_main_t *) vlib main data structure pointer
532 @param buffers - (u32 * ) buffer index array
533 @param n_buffers - (u32) number of buffers to free
534
535*/
Damjan Marion878c6092017-01-04 13:19:27 +0100536always_inline void
537vlib_buffer_free_no_next (vlib_main_t * vm,
538 /* pointer to first buffer */
539 u32 * buffers,
540 /* number of buffers to free */
541 u32 n_buffers)
542{
Damjan Mariond50e3472019-01-20 00:03:56 +0100543 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100544
545 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
546
547 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
548}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700549
550/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400551 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700552
553 @param vm - (vlib_main_t *) vlib main data structure pointer
554 @param buffer_index - (u32) buffer index to free
555*/
556always_inline void
557vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
558{
559 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
560}
561
Damjan Mariona3731492018-02-25 22:50:39 +0100562/** \brief Free buffers from ring
563
564 @param vm - (vlib_main_t *) vlib main data structure pointer
565 @param buffers - (u32 * ) buffer index ring
566 @param start - (u32) first slot in the ring
567 @param ring_size - (u32) ring size
568 @param n_buffers - (u32) number of buffers
569*/
570always_inline void
571vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
572 u32 ring_size, u32 n_buffers)
573{
574 ASSERT (n_buffers <= ring_size);
575
576 if (PREDICT_TRUE (start + n_buffers <= ring_size))
577 {
578 vlib_buffer_free (vm, ring + start, n_buffers);
579 }
580 else
581 {
582 vlib_buffer_free (vm, ring + start, ring_size - start);
583 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
584 }
585}
586
Damjan Marioncef1db92018-03-28 18:27:38 +0200587/** \brief Free buffers from ring without freeing tail buffers
588
589 @param vm - (vlib_main_t *) vlib main data structure pointer
590 @param buffers - (u32 * ) buffer index ring
591 @param start - (u32) first slot in the ring
592 @param ring_size - (u32) ring size
593 @param n_buffers - (u32) number of buffers
594*/
595always_inline void
596vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
597 u32 ring_size, u32 n_buffers)
598{
599 ASSERT (n_buffers <= ring_size);
600
601 if (PREDICT_TRUE (start + n_buffers <= ring_size))
602 {
Damjan Marion4a973932018-06-09 19:29:16 +0200603 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +0200604 }
605 else
606 {
607 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
608 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
609 }
610}
Damjan Mariona3731492018-02-25 22:50:39 +0100611
Ed Warnickecb9cada2015-12-08 15:45:58 -0700612/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100613vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
614 u32 n_data_bytes,
615 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100616always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100617vlib_buffer_delete_free_list (vlib_main_t * vm,
618 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100619{
Damjan Mariond50e3472019-01-20 00:03:56 +0100620 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100621
622 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
623
624 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
625}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700626
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100627/* Make sure we have at least given number of unaligned buffers. */
628void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
629 vlib_buffer_free_list_t *
630 free_list,
631 uword n_unaligned_buffers);
632
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100633always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100634vlib_buffer_get_free_list (vlib_main_t * vm,
635 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700636{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400637 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700638
Damjan Mariond1274cb2018-03-13 21:32:17 +0100639 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700640
641 /* Sanity: indices must match. */
642 ASSERT (f->index == free_list_index);
643
644 return f;
645}
646
647always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100648vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
649 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700650{
Damjan Mariondac03522018-02-01 15:30:13 +0100651 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700652 return f->n_data_bytes;
653}
654
Ed Warnickecb9cada2015-12-08 15:45:58 -0700655/* Append given data to end of buffer, possibly allocating new buffers. */
Damjan Marionab9b7ec2019-01-18 20:24:44 +0100656int vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data,
657 u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700658
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100659/* duplicate all buffers in chain */
660always_inline vlib_buffer_t *
661vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
662{
663 vlib_buffer_t *s, *d, *fd;
664 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100665 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100666 int i;
667
668 s = b;
669 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
670 {
671 n_buffers++;
672 s = vlib_get_buffer (vm, s->next_buffer);
673 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700674 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100675
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100676 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500677
678 /* No guarantee that we'll get all the buffers we asked for */
679 if (PREDICT_FALSE (n_alloc < n_buffers))
680 {
681 if (n_alloc > 0)
682 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500683 return 0;
684 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100685
686 /* 1st segment */
687 s = b;
688 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100689 d->current_data = s->current_data;
690 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100691 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100692 d->total_length_not_including_first_buffer =
693 s->total_length_not_including_first_buffer;
Dave Barach178cf492018-11-13 16:34:13 -0500694 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
695 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
696 clib_memcpy_fast (vlib_buffer_get_current (d),
697 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100698
699 /* next segments */
700 for (i = 1; i < n_buffers; i++)
701 {
702 /* previous */
703 d->next_buffer = new_buffers[i];
704 /* current */
705 s = vlib_get_buffer (vm, s->next_buffer);
706 d = vlib_get_buffer (vm, new_buffers[i]);
707 d->current_data = s->current_data;
708 d->current_length = s->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500709 clib_memcpy_fast (vlib_buffer_get_current (d),
710 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100711 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100712 }
713
714 return fd;
715}
716
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800717/** \brief Create a maximum of 256 clones of buffer and store them
718 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100719
720 @param vm - (vlib_main_t *) vlib main data structure pointer
721 @param src_buffer - (u32) source buffer index
722 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800723 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100724 @param head_end_offset - (u16) offset relative to current position
725 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800726 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100727 less than the number requested or zero
728*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800729always_inline u16
730vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
731 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100732{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800733 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100734 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
735
736 ASSERT (s->n_add_refs == 0);
737 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800738 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100739
740 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
741 {
742 buffers[0] = src_buffer;
743 for (i = 1; i < n_buffers; i++)
744 {
745 vlib_buffer_t *d;
746 d = vlib_buffer_copy (vm, s);
747 if (d == 0)
748 return i;
749 buffers[i] = vlib_get_buffer_index (vm, d);
750
751 }
752 return n_buffers;
753 }
754
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800755 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100756 {
757 buffers[0] = src_buffer;
758 return 1;
759 }
760
Damjan Marion36eb7c22019-01-18 20:45:30 +0100761 n_buffers = vlib_buffer_alloc (vm, buffers, n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800762
Damjan Marionc47ed032017-01-25 14:18:03 +0100763 for (i = 0; i < n_buffers; i++)
764 {
765 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
766 d->current_data = s->current_data;
767 d->current_length = head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200768 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100769 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200770 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
771 {
772 d->total_length_not_including_first_buffer +=
773 s->total_length_not_including_first_buffer;
774 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100775 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
776 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Dave Barach178cf492018-11-13 16:34:13 -0500777 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
778 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
779 clib_memcpy_fast (vlib_buffer_get_current (d),
780 vlib_buffer_get_current (s), head_end_offset);
Damjan Marionc47ed032017-01-25 14:18:03 +0100781 d->next_buffer = src_buffer;
782 }
783 vlib_buffer_advance (s, head_end_offset);
784 s->n_add_refs = n_buffers - 1;
785 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
786 {
787 s = vlib_get_buffer (vm, s->next_buffer);
788 s->n_add_refs = n_buffers - 1;
789 }
790
791 return n_buffers;
792}
793
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800794/** \brief Create multiple clones of buffer and store them
795 in the supplied array
796
797 @param vm - (vlib_main_t *) vlib main data structure pointer
798 @param src_buffer - (u32) source buffer index
799 @param buffers - (u32 * ) buffer index array
800 @param n_buffers - (u16) number of buffer clones requested (<=256)
801 @param head_end_offset - (u16) offset relative to current position
802 where packet head ends
803 @return - (u16) number of buffers actually cloned, may be
804 less than the number requested or zero
805*/
806always_inline u16
807vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
808 u16 n_buffers, u16 head_end_offset)
809{
810 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
811 u16 n_cloned = 0;
812
813 while (n_buffers > 256)
814 {
815 vlib_buffer_t *copy;
816 copy = vlib_buffer_copy (vm, s);
817 n_cloned += vlib_buffer_clone_256 (vm,
818 vlib_get_buffer_index (vm, copy),
819 (buffers + n_cloned),
820 256, head_end_offset);
821 n_buffers -= 256;
822 }
823 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
824 buffers + n_cloned,
825 n_buffers, head_end_offset);
826
827 return n_cloned;
828}
829
Damjan Marionc47ed032017-01-25 14:18:03 +0100830/** \brief Attach cloned tail to the buffer
831
832 @param vm - (vlib_main_t *) vlib main data structure pointer
833 @param head - (vlib_buffer_t *) head buffer
834 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
835*/
836
837always_inline void
838vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
839 vlib_buffer_t * tail)
840{
841 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100842
843 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
844 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
845 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
846 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
847 head->next_buffer = vlib_get_buffer_index (vm, tail);
848 head->total_length_not_including_first_buffer = tail->current_length +
849 tail->total_length_not_including_first_buffer;
850
851next_segment:
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000852 clib_atomic_add_fetch (&tail->n_add_refs, 1);
Damjan Marionc47ed032017-01-25 14:18:03 +0100853
854 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
855 {
856 tail = vlib_get_buffer (vm, tail->next_buffer);
857 goto next_segment;
858 }
859}
860
Pierre Pfister328e99b2016-02-12 13:18:42 +0000861/* Initializes the buffer as an empty packet with no chained buffers. */
862always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400863vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000864{
865 first->total_length_not_including_first_buffer = 0;
866 first->current_length = 0;
867 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
868 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000869}
870
871/* The provided next_bi buffer index is appended to the end of the packet. */
872always_inline vlib_buffer_t *
Eyal Barib688fb12018-11-12 16:13:49 +0200873vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000874{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400875 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000876 last->next_buffer = next_bi;
877 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
878 next_buffer->current_length = 0;
879 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000880 return next_buffer;
881}
882
883/* Increases or decreases the packet length.
884 * It does not allocate or deallocate new buffers.
885 * Therefore, the added length must be compatible
886 * with the last buffer. */
887always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400888vlib_buffer_chain_increase_length (vlib_buffer_t * first,
889 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000890{
891 last->current_length += len;
892 if (first != last)
893 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000894}
895
896/* Copy data to the end of the packet and increases its length.
897 * It does not allocate new buffers.
898 * Returns the number of copied bytes. */
899always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400900vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100901 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400902 vlib_buffer_t * first,
903 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000904{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400905 u32 n_buffer_bytes =
906 vlib_buffer_free_list_buffer_size (vm, free_list_index);
907 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
908 u16 len = clib_min (data_len,
909 n_buffer_bytes - last->current_length -
910 last->current_data);
Dave Barach178cf492018-11-13 16:34:13 -0500911 clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length,
912 data, len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400913 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000914 return len;
915}
916
917/* Copy data to the end of the packet and increases its length.
918 * Allocates additional buffers from the free list if necessary.
919 * Returns the number of copied bytes.
920 * 'last' value is modified whenever new buffers are allocated and
921 * chained and points to the last buffer in the chain. */
922u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400923vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100924 vlib_buffer_free_list_index_t
925 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400926 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +0100927 vlib_buffer_t ** last, void *data,
928 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400929void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000930
Dave Barach9b8ffd92016-07-08 08:13:45 -0400931format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
932 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700933
Dave Barach9b8ffd92016-07-08 08:13:45 -0400934typedef struct
935{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700936 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400937 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700938
Damjan Mariond1274cb2018-03-13 21:32:17 +0100939 /* Number of buffers to allocate in each call to allocator. */
940 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700941
942 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +0100943 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700944
Dave Barach9b8ffd92016-07-08 08:13:45 -0400945 u32 *free_buffers;
Damjan Marion671e60e2018-12-30 18:09:59 +0100946
947 u8 *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700948} vlib_packet_template_t;
949
950void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
951 vlib_packet_template_t * t);
952
953void vlib_packet_template_init (vlib_main_t * vm,
954 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400955 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700956 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +0100957 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400958 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700959
Dave Barach9b8ffd92016-07-08 08:13:45 -0400960void *vlib_packet_template_get_packet (vlib_main_t * vm,
961 vlib_packet_template_t * t,
962 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700963
964always_inline void
965vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
966{
967 vec_free (t->packet_data);
968}
969
Ed Warnickecb9cada2015-12-08 15:45:58 -0700970/* Set a buffer quickly into "uninitialized" state. We want this to
971 be extremely cheap and arrange for all fields that need to be
972 initialized to be in the first 128 bits of the buffer. */
973always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100974vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700975 vlib_buffer_free_list_t * fl)
976{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100977 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700978
Damjan Marion19010202016-03-24 17:17:47 +0100979 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400980 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
981 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
982 CLIB_CACHE_LINE_BYTES);
983 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
984 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +0100985
Ed Warnickecb9cada2015-12-08 15:45:58 -0700986 /* Make sure buffer template is sane. */
Damjan Marione58041f2019-01-18 19:56:09 +0100987 vlib_buffer_copy_template (dst, src);
Dave Barachf8690282017-03-01 11:38:02 -0500988
989 /* Not in the first 16 octets. */
990 dst->n_add_refs = src->n_add_refs;
991
Ed Warnickecb9cada2015-12-08 15:45:58 -0700992 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500993#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400994 _(current_data);
995 _(current_length);
996 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700997#undef _
Florin Corasb2215d62017-08-01 16:56:58 -0700998 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
999 /* total_length_not_including_first_buffer is not in the template anymore
1000 * so it may actually not zeroed for some buffers. One option is to
1001 * uncomment the line lower (comes at a cost), the other, is to just not
1002 * care */
1003 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001004 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001005}
1006
Damjan Mariond50e3472019-01-20 00:03:56 +01001007static_always_inline vlib_buffer_pool_t *
1008vlib_buffer_pool_get (vlib_main_t * vm, u8 buffer_pool_index)
1009{
1010 vlib_buffer_main_t *bm = vm->buffer_main;
1011 return vec_elt_at_index (bm->buffer_pools, buffer_pool_index);
1012}
1013
Ed Warnickecb9cada2015-12-08 15:45:58 -07001014always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001015vlib_buffer_add_to_free_list (vlib_main_t * vm,
1016 vlib_buffer_free_list_t * f,
1017 u32 buffer_index, u8 do_init)
1018{
Damjan Mariond50e3472019-01-20 00:03:56 +01001019 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (vm, f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001020 vlib_buffer_t *b;
1021 b = vlib_get_buffer (vm, buffer_index);
1022 if (PREDICT_TRUE (do_init))
1023 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001024 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001025
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001026 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001027 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001028 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001029 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001030 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001031 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001032 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001033 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001034 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001035 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001036}
1037
Ed Warnickecb9cada2015-12-08 15:45:58 -07001038#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001039extern u32 *vlib_buffer_state_validation_lock;
1040extern uword *vlib_buffer_state_validation_hash;
1041extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001042#endif
1043
Dave Barach9b8ffd92016-07-08 08:13:45 -04001044static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001045vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1046{
1047#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001048 uword *p;
1049 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001050
1051 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1052
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001053 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001054 ;
1055
1056 p = hash_get (vlib_buffer_state_validation_hash, b);
1057
1058 /* If we don't know about b, declare it to be in the expected state */
1059 if (!p)
1060 {
1061 hash_set (vlib_buffer_state_validation_hash, b, expected);
1062 goto out;
1063 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001064
Ed Warnickecb9cada2015-12-08 15:45:58 -07001065 if (p[0] != expected)
1066 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001067 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001068 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001069 vlib_main_t *vm = &vlib_global_main;
1070
1071 cj_stop ();
1072
Ed Warnickecb9cada2015-12-08 15:45:58 -07001073 bi = vlib_get_buffer_index (vm, b);
1074
1075 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001076 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1077 vlib_time_now (vm), bi,
1078 p[0] ? "busy" : "free", expected ? "busy" : "free");
1079 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001080 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001081out:
1082 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001083 *vlib_buffer_state_validation_lock = 0;
1084 clib_mem_set_heap (oldheap);
1085#endif
1086}
1087
Dave Barach9b8ffd92016-07-08 08:13:45 -04001088static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001089vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1090{
1091#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001092 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001093
1094 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1095
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001096 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001097 ;
1098
1099 hash_set (vlib_buffer_state_validation_hash, b, expected);
1100
Dave Barach9b8ffd92016-07-08 08:13:45 -04001101 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001102 *vlib_buffer_state_validation_lock = 0;
1103 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001104#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001105}
1106
Klement Sekera75e7d132017-09-20 08:26:30 +02001107/** minimum data size of first buffer in a buffer chain */
1108#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1109
1110/**
1111 * @brief compress buffer chain in a way where the first buffer is at least
1112 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1113 *
1114 * @param[in] vm - vlib_main
1115 * @param[in,out] first - first buffer in chain
1116 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1117 * from the chain
1118 */
1119always_inline void
1120vlib_buffer_chain_compress (vlib_main_t * vm,
1121 vlib_buffer_t * first, u32 ** discard_vector)
1122{
1123 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1124 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1125 {
1126 /* this is already big enough or not a chain */
1127 return;
1128 }
1129 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Marion36eb7c22019-01-18 20:45:30 +01001130 vlib_buffer_free_list_t *free_list;
1131
1132 free_list = pool_elt_at_index (vm->buffer_free_list_pool,
1133 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Klement Sekera75e7d132017-09-20 08:26:30 +02001134
1135 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1136 free_list->n_data_bytes -
1137 first->current_data);
1138 do
1139 {
1140 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1141 u32 need = want_first_size - first->current_length;
1142 u32 amount_to_copy = clib_min (need, second->current_length);
Dave Barach178cf492018-11-13 16:34:13 -05001143 clib_memcpy_fast (((u8 *) vlib_buffer_get_current (first)) +
1144 first->current_length,
1145 vlib_buffer_get_current (second), amount_to_copy);
Klement Sekera75e7d132017-09-20 08:26:30 +02001146 first->current_length += amount_to_copy;
Klement Sekera69db1a62018-12-11 16:55:33 +01001147 second->current_data += amount_to_copy;
1148 second->current_length -= amount_to_copy;
Klement Sekera75e7d132017-09-20 08:26:30 +02001149 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1150 {
1151 first->total_length_not_including_first_buffer -= amount_to_copy;
1152 }
1153 if (!second->current_length)
1154 {
1155 vec_add1 (*discard_vector, first->next_buffer);
1156 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1157 {
1158 first->next_buffer = second->next_buffer;
1159 }
1160 else
1161 {
1162 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1163 }
1164 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1165 }
1166 }
1167 while ((first->current_length < want_first_size) &&
1168 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1169}
1170
Eyal Barid3d42412018-11-05 13:29:25 +02001171/**
1172 * @brief linearize buffer chain - the first buffer is filled, if needed,
1173 * buffers are allocated and filled, returns free space in last buffer or
1174 * negative on failure
1175 *
1176 * @param[in] vm - vlib_main
1177 * @param[in,out] first - first buffer in chain
1178 */
1179always_inline int
1180vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * first)
1181{
1182 vlib_buffer_t *b = first;
1183 vlib_buffer_free_list_t *fl =
Damjan Marion36eb7c22019-01-18 20:45:30 +01001184 vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Eyal Barid3d42412018-11-05 13:29:25 +02001185 u32 buf_len = fl->n_data_bytes;
1186 // free buffer chain starting from the second buffer
1187 int free_count = (b->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
1188 u32 chain_to_free = b->next_buffer;
1189
1190 u32 len = vlib_buffer_length_in_chain (vm, b);
1191 u32 free_len = buf_len - b->current_data - b->current_length;
1192 int alloc_len = clib_max (len - free_len, 0); //use the free len in the first buffer
1193 int n_buffers = (alloc_len + buf_len - 1) / buf_len;
1194 u32 new_buffers[n_buffers];
1195
1196 u32 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
1197 if (n_alloc != n_buffers)
1198 {
1199 vlib_buffer_free_no_next (vm, new_buffers, n_alloc);
1200 return -1;
1201 }
1202
1203 vlib_buffer_t *s = b;
1204 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1205 {
1206 s = vlib_get_buffer (vm, s->next_buffer);
1207 int d_free_len = buf_len - b->current_data - b->current_length;
1208 ASSERT (d_free_len >= 0);
1209 // chain buf and split write
1210 u32 copy_len = clib_min (d_free_len, s->current_length);
1211 u8 *d = vlib_buffer_put_uninit (b, copy_len);
1212 clib_memcpy (d, vlib_buffer_get_current (s), copy_len);
1213 int rest = s->current_length - copy_len;
1214 if (rest > 0)
1215 {
1216 //prev buf is full
1217 ASSERT (vlib_buffer_get_tail (b) == b->data + buf_len);
1218 ASSERT (n_buffers > 0);
1219 b = vlib_buffer_chain_buffer (vm, b, new_buffers[--n_buffers]);
1220 //make full use of the new buffers
1221 b->current_data = 0;
1222 d = vlib_buffer_put_uninit (b, rest);
1223 clib_memcpy (d, vlib_buffer_get_current (s) + copy_len, rest);
1224 }
1225 }
1226 vlib_buffer_free (vm, &chain_to_free, free_count);
1227 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1228 if (b == first) /* no buffers addeed */
1229 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1230 ASSERT (len == vlib_buffer_length_in_chain (vm, first));
1231 ASSERT (n_buffers == 0);
1232 return buf_len - b->current_data - b->current_length;
1233}
1234
Ed Warnickecb9cada2015-12-08 15:45:58 -07001235#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001236
1237/*
1238 * fd.io coding-style-patch-verification: ON
1239 *
1240 * Local Variables:
1241 * eval: (c-set-style "gnu")
1242 * End:
1243 */