blob: cc72c57d5e8d4cb04c6a5d843a2d65d1a114c2c2 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
46/** \file
47 vlib buffer access methods.
48*/
49
50
51/** \brief Translate buffer index into buffer pointer
52
53 @param vm - (vlib_main_t *) vlib main data structure pointer
54 @param buffer_index - (u32) buffer index
55 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040056*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070057always_inline vlib_buffer_t *
58vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
59{
Damjan Mariond1274cb2018-03-13 21:32:17 +010060 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020061 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
62 ASSERT (offset < bm->buffer_mem_size);
63
64 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070065}
66
Damjan Marione58041f2019-01-18 19:56:09 +010067static_always_inline void
Damjan Marion64d557c2019-01-18 20:03:41 +010068vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices)
69{
70 clib_memcpy_fast (dst, src, n_indices * sizeof (u32));
71}
72
73static_always_inline void
Damjan Marione58041f2019-01-18 19:56:09 +010074vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
75{
76 clib_memcpy_fast (b, bt, STRUCT_OFFSET_OF (vlib_buffer_t, template_end));
77}
78
Damjan Marionafe56de2018-05-17 12:44:00 +020079/** \brief Translate array of buffer indices into buffer pointers with offset
80
81 @param vm - (vlib_main_t *) vlib main data structure pointer
82 @param bi - (u32 *) array of buffer indices
83 @param b - (void **) array to store buffer pointers
84 @param count - (uword) number of elements
85 @param offset - (i32) offset applied to each pointer
86*/
87static_always_inline void
88vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
89 i32 offset)
90{
91#ifdef CLIB_HAVE_VEC256
92 u64x4 off = u64x4_splat (buffer_main.buffer_mem_start + offset);
93 /* if count is not const, compiler will not unroll while loop
94 se we maintain two-in-parallel variant */
95 while (count >= 8)
96 {
97 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
98 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
99 /* shift and add to get vlib_buffer_t pointer */
100 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
101 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
102 b += 8;
103 bi += 8;
104 count -= 8;
105 }
106#endif
107 while (count >= 4)
108 {
109#ifdef CLIB_HAVE_VEC256
110 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
111 /* shift and add to get vlib_buffer_t pointer */
112 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800113#elif defined (CLIB_HAVE_VEC128)
Damjan Marion5df580e2018-07-27 01:47:57 +0200114 u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
115 u32x4 bi4 = u32x4_load_unaligned (bi);
116 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800117#if defined (__aarch64__)
118 u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
119#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200120 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
121 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800122#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200123 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
124 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200125#else
126 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
127 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
128 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
129 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
130#endif
131 b += 4;
132 bi += 4;
133 count -= 4;
134 }
135 while (count)
136 {
137 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
138 b += 1;
139 bi += 1;
140 count -= 1;
141 }
142}
143
144/** \brief Translate array of buffer indices into buffer pointers
145
146 @param vm - (vlib_main_t *) vlib main data structure pointer
147 @param bi - (u32 *) array of buffer indices
148 @param b - (vlib_buffer_t **) array to store buffer pointers
149 @param count - (uword) number of elements
150*/
151
152static_always_inline void
153vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
154{
155 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
156}
157
Ed Warnickecb9cada2015-12-08 15:45:58 -0700158/** \brief Translate buffer pointer into buffer index
159
160 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400161 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700162 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400163*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200164
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400166vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100168 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200169 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
170 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
171 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400172 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
174}
175
Damjan Marionafe56de2018-05-17 12:44:00 +0200176/** \brief Translate array of buffer pointers into buffer indices with offset
177
178 @param vm - (vlib_main_t *) vlib main data structure pointer
179 @param b - (void **) array of buffer pointers
180 @param bi - (u32 *) array to store buffer indices
181 @param count - (uword) number of elements
182 @param offset - (i32) offset applied to each pointer
183*/
184static_always_inline void
185vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
186 uword count, i32 offset)
187{
188#ifdef CLIB_HAVE_VEC256
189 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
190 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - offset);
191
192 while (count >= 8)
193 {
194 /* load 4 pointers into 256-bit register */
195 u64x4 v0 = u64x4_load_unaligned (b);
196 u64x4 v1 = u64x4_load_unaligned (b + 4);
197 u32x8 v2, v3;
198
199 v0 -= off4;
200 v1 -= off4;
201
202 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
203 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
204
205 /* permute 256-bit register so lower u32s of each buffer index are
206 * placed into lower 128-bits */
207 v2 = u32x8_permute ((u32x8) v0, mask);
208 v3 = u32x8_permute ((u32x8) v1, mask);
209
210 /* extract lower 128-bits and save them to the array of buffer indices */
211 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
212 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
213 bi += 8;
214 b += 8;
215 count -= 8;
216 }
217#endif
218 while (count >= 4)
219 {
220 /* equivalent non-nector implementation */
221 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
222 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
223 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
224 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
225 bi += 4;
226 b += 4;
227 count -= 4;
228 }
229 while (count)
230 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400231 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200232 bi += 1;
233 b += 1;
234 count -= 1;
235 }
236}
237
238/** \brief Translate array of buffer pointers into buffer indices
239
240 @param vm - (vlib_main_t *) vlib main data structure pointer
241 @param b - (vlib_buffer_t **) array of buffer pointers
242 @param bi - (u32 *) array to store buffer indices
243 @param count - (uword) number of elements
244*/
245static_always_inline void
246vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
247 uword count)
248{
249 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
250}
251
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252/** \brief Get next buffer in buffer linklist, or zero for end of list.
253
254 @param vm - (vlib_main_t *) vlib main data structure pointer
255 @param b - (void *) buffer pointer
256 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400257*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258always_inline vlib_buffer_t *
259vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
260{
261 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400262 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700263}
264
Dave Barach9b8ffd92016-07-08 08:13:45 -0400265uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
266 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700267
268/** \brief Get length in bytes of the buffer chain
269
270 @param vm - (vlib_main_t *) vlib main data structure pointer
271 @param b - (void *) buffer pointer
272 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400273*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274always_inline uword
275vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
276{
Damjan Marion072401e2017-07-13 18:53:27 +0200277 uword len = b->current_length;
278
279 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
280 return len;
281
282 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
283 return len + b->total_length_not_including_first_buffer;
284
285 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286}
287
288/** \brief Get length in bytes of the buffer index buffer chain
289
290 @param vm - (vlib_main_t *) vlib main data structure pointer
291 @param bi - (u32) buffer index
292 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400293*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700294always_inline uword
295vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
296{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400297 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298 return vlib_buffer_length_in_chain (vm, b);
299}
300
301/** \brief Copy buffer contents to memory
302
303 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400304 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700305 @param contents - (u8 *) memory, <strong>must be large enough</strong>
306 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400307*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308always_inline uword
309vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
310{
311 uword content_len = 0;
312 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400313 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314
315 while (1)
316 {
317 b = vlib_get_buffer (vm, buffer_index);
318 l = b->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500319 clib_memcpy_fast (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700320 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400321 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322 break;
323 buffer_index = b->next_buffer;
324 }
325
326 return content_len;
327}
328
Damjan Marion8f499362018-10-22 13:07:02 +0200329always_inline uword
330vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700331{
Damjan Marion68b4da62018-09-30 18:26:20 +0200332 return vlib_physmem_get_pa (vm, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333}
334
Damjan Marion8f499362018-10-22 13:07:02 +0200335always_inline uword
336vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b)
337{
338 return vlib_buffer_get_pa (vm, b) + b->current_data;
339}
340
Ed Warnickecb9cada2015-12-08 15:45:58 -0700341/** \brief Prefetch buffer metadata by buffer index
342 The first 64 bytes of buffer contains most header information
343
344 @param vm - (vlib_main_t *) vlib main data structure pointer
345 @param bi - (u32) buffer index
346 @param type - LOAD, STORE. In most cases, STORE is the right answer
347*/
348/* Prefetch buffer header given index. */
349#define vlib_prefetch_buffer_with_index(vm,bi,type) \
350 do { \
351 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
352 vlib_prefetch_buffer_header (_b, type); \
353 } while (0)
354
Dave Barach9b8ffd92016-07-08 08:13:45 -0400355typedef enum
356{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357 /* Index is unknown. */
358 VLIB_BUFFER_UNKNOWN,
359
360 /* Index is known and free/allocated. */
361 VLIB_BUFFER_KNOWN_FREE,
362 VLIB_BUFFER_KNOWN_ALLOCATED,
363} vlib_buffer_known_state_t;
364
Damjan Marionc8a26c62017-11-24 20:15:23 +0100365void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
366 uword n_buffers,
367 vlib_buffer_known_state_t
368 expected_state);
369
Ed Warnickecb9cada2015-12-08 15:45:58 -0700370always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800371vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700372{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100373 vlib_buffer_main_t *bm = &buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700374
Damjan Marion6b0f5892017-07-27 04:01:24 -0400375 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400376 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400377 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
379}
380
381always_inline void
Steven899a84b2018-01-29 20:09:09 -0800382vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700383 vlib_buffer_known_state_t state)
384{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100385 vlib_buffer_main_t *bm = &buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800386
Damjan Marion6b0f5892017-07-27 04:01:24 -0400387 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400389 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390}
391
392/* Validates sanity of a single buffer.
393 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400394u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
395 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397always_inline u32
398vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400399{
400 return round_pow2 (size, sizeof (vlib_buffer_t));
401}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700402
Damjan Mariondac03522018-02-01 15:30:13 +0100403always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200404vlib_buffer_get_free_list_index (vlib_buffer_t * b)
405{
Damjan Mariondac03522018-02-01 15:30:13 +0100406 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
407 return b->free_list_index;
408
409 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200410}
411
412always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100413vlib_buffer_set_free_list_index (vlib_buffer_t * b,
414 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200415{
Damjan Mariondac03522018-02-01 15:30:13 +0100416 if (PREDICT_FALSE (index))
417 {
418 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
419 b->free_list_index = index;
420 }
421 else
422 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200423}
424
Ed Warnickecb9cada2015-12-08 15:45:58 -0700425/** \brief Allocate buffers from specific freelist into supplied array
426
427 @param vm - (vlib_main_t *) vlib main data structure pointer
428 @param buffers - (u32 * ) buffer index array
429 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400430 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700431 less than the number requested or zero
432*/
Damjan Marion878c6092017-01-04 13:19:27 +0100433always_inline u32
434vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
435 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100436 u32 n_buffers,
437 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100438{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100439 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100440 vlib_buffer_free_list_t *fl;
441 u32 *src;
442 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100443
Damjan Marionc8a26c62017-11-24 20:15:23 +0100444 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100445
Damjan Mariond1274cb2018-03-13 21:32:17 +0100446 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100447
448 len = vec_len (fl->buffers);
449
450 if (PREDICT_FALSE (len < n_buffers))
451 {
452 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100453 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
454 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100455
456 /* even if fill free list didn't manage to refill free list
457 we should give what we have */
458 n_buffers = clib_min (len, n_buffers);
459
460 /* following code is intentionaly duplicated to allow compiler
461 to optimize fast path when n_buffers is constant value */
462 src = fl->buffers + len - n_buffers;
Damjan Marion64d557c2019-01-18 20:03:41 +0100463 vlib_buffer_copy_indices (buffers, src, n_buffers);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100464 _vec_len (fl->buffers) -= n_buffers;
465
466 /* Verify that buffers are known free. */
467 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
468 VLIB_BUFFER_KNOWN_FREE);
469
470 return n_buffers;
471 }
472
473 src = fl->buffers + len - n_buffers;
Damjan Marion64d557c2019-01-18 20:03:41 +0100474 vlib_buffer_copy_indices (buffers, src, n_buffers);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100475 _vec_len (fl->buffers) -= n_buffers;
476
477 /* Verify that buffers are known free. */
478 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
479 VLIB_BUFFER_KNOWN_FREE);
480
481 return n_buffers;
482}
483
484/** \brief Allocate buffers into supplied array
485
486 @param vm - (vlib_main_t *) vlib main data structure pointer
487 @param buffers - (u32 * ) buffer index array
488 @param n_buffers - (u32) number of buffers requested
489 @return - (u32) number of buffers actually allocated, may be
490 less than the number requested or zero
491*/
492always_inline u32
493vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
494{
495 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
496 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100497}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498
Damjan Marionc58408c2018-01-18 14:54:04 +0100499/** \brief Allocate buffers into ring
500
501 @param vm - (vlib_main_t *) vlib main data structure pointer
502 @param buffers - (u32 * ) buffer index ring
503 @param start - (u32) first slot in the ring
504 @param ring_size - (u32) ring size
505 @param n_buffers - (u32) number of buffers requested
506 @return - (u32) number of buffers actually allocated, may be
507 less than the number requested or zero
508*/
509always_inline u32
510vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
511 u32 ring_size, u32 n_buffers)
512{
513 u32 n_alloc;
514
515 ASSERT (n_buffers <= ring_size);
516
517 if (PREDICT_TRUE (start + n_buffers <= ring_size))
518 return vlib_buffer_alloc (vm, ring + start, n_buffers);
519
520 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
521
522 if (PREDICT_TRUE (n_alloc == ring_size - start))
523 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
524
525 return n_alloc;
526}
527
Ed Warnickecb9cada2015-12-08 15:45:58 -0700528/** \brief Free buffers
529 Frees the entire buffer chain for each buffer
530
531 @param vm - (vlib_main_t *) vlib main data structure pointer
532 @param buffers - (u32 * ) buffer index array
533 @param n_buffers - (u32) number of buffers to free
534
535*/
Damjan Marion878c6092017-01-04 13:19:27 +0100536always_inline void
537vlib_buffer_free (vlib_main_t * vm,
538 /* pointer to first buffer */
539 u32 * buffers,
540 /* number of buffers to free */
541 u32 n_buffers)
542{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100543 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100544
545 ASSERT (bm->cb.vlib_buffer_free_cb);
546
547 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
548}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700549
550/** \brief Free buffers, does not free the buffer chain for each buffer
551
552 @param vm - (vlib_main_t *) vlib main data structure pointer
553 @param buffers - (u32 * ) buffer index array
554 @param n_buffers - (u32) number of buffers to free
555
556*/
Damjan Marion878c6092017-01-04 13:19:27 +0100557always_inline void
558vlib_buffer_free_no_next (vlib_main_t * vm,
559 /* pointer to first buffer */
560 u32 * buffers,
561 /* number of buffers to free */
562 u32 n_buffers)
563{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100564 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100565
566 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
567
568 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
569}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570
571/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400572 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700573
574 @param vm - (vlib_main_t *) vlib main data structure pointer
575 @param buffer_index - (u32) buffer index to free
576*/
577always_inline void
578vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
579{
580 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
581}
582
Damjan Mariona3731492018-02-25 22:50:39 +0100583/** \brief Free buffers from ring
584
585 @param vm - (vlib_main_t *) vlib main data structure pointer
586 @param buffers - (u32 * ) buffer index ring
587 @param start - (u32) first slot in the ring
588 @param ring_size - (u32) ring size
589 @param n_buffers - (u32) number of buffers
590*/
591always_inline void
592vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
593 u32 ring_size, u32 n_buffers)
594{
595 ASSERT (n_buffers <= ring_size);
596
597 if (PREDICT_TRUE (start + n_buffers <= ring_size))
598 {
599 vlib_buffer_free (vm, ring + start, n_buffers);
600 }
601 else
602 {
603 vlib_buffer_free (vm, ring + start, ring_size - start);
604 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
605 }
606}
607
Damjan Marioncef1db92018-03-28 18:27:38 +0200608/** \brief Free buffers from ring without freeing tail buffers
609
610 @param vm - (vlib_main_t *) vlib main data structure pointer
611 @param buffers - (u32 * ) buffer index ring
612 @param start - (u32) first slot in the ring
613 @param ring_size - (u32) ring size
614 @param n_buffers - (u32) number of buffers
615*/
616always_inline void
617vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
618 u32 ring_size, u32 n_buffers)
619{
620 ASSERT (n_buffers <= ring_size);
621
622 if (PREDICT_TRUE (start + n_buffers <= ring_size))
623 {
Damjan Marion4a973932018-06-09 19:29:16 +0200624 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +0200625 }
626 else
627 {
628 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
629 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
630 }
631}
Damjan Mariona3731492018-02-25 22:50:39 +0100632
Ed Warnickecb9cada2015-12-08 15:45:58 -0700633/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100634vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
635 u32 n_data_bytes,
636 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100637always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100638vlib_buffer_delete_free_list (vlib_main_t * vm,
639 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100640{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100641 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100642
643 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
644
645 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
646}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700647
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100648/* Make sure we have at least given number of unaligned buffers. */
649void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
650 vlib_buffer_free_list_t *
651 free_list,
652 uword n_unaligned_buffers);
653
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100654always_inline vlib_buffer_free_list_t *
655vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100656 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100657{
Damjan Mariondac03522018-02-01 15:30:13 +0100658 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100659
Damjan Marion072401e2017-07-13 18:53:27 +0200660 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Mariond1274cb2018-03-13 21:32:17 +0100661 return pool_elt_at_index (vm->buffer_free_list_pool, i);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100662}
663
Ed Warnickecb9cada2015-12-08 15:45:58 -0700664always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100665vlib_buffer_get_free_list (vlib_main_t * vm,
666 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700667{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400668 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700669
Damjan Mariond1274cb2018-03-13 21:32:17 +0100670 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700671
672 /* Sanity: indices must match. */
673 ASSERT (f->index == free_list_index);
674
675 return f;
676}
677
678always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100679vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
680 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700681{
Damjan Mariondac03522018-02-01 15:30:13 +0100682 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700683 return f->n_data_bytes;
684}
685
Ed Warnickecb9cada2015-12-08 15:45:58 -0700686/* Append given data to end of buffer, possibly allocating new buffers. */
Damjan Marionab9b7ec2019-01-18 20:24:44 +0100687int vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data,
688 u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700689
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100690/* duplicate all buffers in chain */
691always_inline vlib_buffer_t *
692vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
693{
694 vlib_buffer_t *s, *d, *fd;
695 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100696 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100697 int i;
698
699 s = b;
700 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
701 {
702 n_buffers++;
703 s = vlib_get_buffer (vm, s->next_buffer);
704 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700705 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100706
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100707 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500708
709 /* No guarantee that we'll get all the buffers we asked for */
710 if (PREDICT_FALSE (n_alloc < n_buffers))
711 {
712 if (n_alloc > 0)
713 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500714 return 0;
715 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100716
717 /* 1st segment */
718 s = b;
719 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100720 d->current_data = s->current_data;
721 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100722 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100723 d->total_length_not_including_first_buffer =
724 s->total_length_not_including_first_buffer;
Dave Barach178cf492018-11-13 16:34:13 -0500725 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
726 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
727 clib_memcpy_fast (vlib_buffer_get_current (d),
728 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100729
730 /* next segments */
731 for (i = 1; i < n_buffers; i++)
732 {
733 /* previous */
734 d->next_buffer = new_buffers[i];
735 /* current */
736 s = vlib_get_buffer (vm, s->next_buffer);
737 d = vlib_get_buffer (vm, new_buffers[i]);
738 d->current_data = s->current_data;
739 d->current_length = s->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500740 clib_memcpy_fast (vlib_buffer_get_current (d),
741 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100742 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100743 }
744
745 return fd;
746}
747
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800748/** \brief Create a maximum of 256 clones of buffer and store them
749 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100750
751 @param vm - (vlib_main_t *) vlib main data structure pointer
752 @param src_buffer - (u32) source buffer index
753 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800754 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100755 @param head_end_offset - (u16) offset relative to current position
756 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800757 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100758 less than the number requested or zero
759*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800760always_inline u16
761vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
762 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100763{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800764 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100765 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
766
767 ASSERT (s->n_add_refs == 0);
768 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800769 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100770
771 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
772 {
773 buffers[0] = src_buffer;
774 for (i = 1; i < n_buffers; i++)
775 {
776 vlib_buffer_t *d;
777 d = vlib_buffer_copy (vm, s);
778 if (d == 0)
779 return i;
780 buffers[i] = vlib_get_buffer_index (vm, d);
781
782 }
783 return n_buffers;
784 }
785
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800786 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100787 {
788 buffers[0] = src_buffer;
789 return 1;
790 }
791
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800792 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
793 vlib_buffer_get_free_list_index
794 (s));
795
Damjan Marionc47ed032017-01-25 14:18:03 +0100796 for (i = 0; i < n_buffers; i++)
797 {
798 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
799 d->current_data = s->current_data;
800 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200801 vlib_buffer_set_free_list_index (d,
802 vlib_buffer_get_free_list_index (s));
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200803
804 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100805 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200806 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
807 {
808 d->total_length_not_including_first_buffer +=
809 s->total_length_not_including_first_buffer;
810 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100811 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
812 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Dave Barach178cf492018-11-13 16:34:13 -0500813 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
814 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
815 clib_memcpy_fast (vlib_buffer_get_current (d),
816 vlib_buffer_get_current (s), head_end_offset);
Damjan Marionc47ed032017-01-25 14:18:03 +0100817 d->next_buffer = src_buffer;
818 }
819 vlib_buffer_advance (s, head_end_offset);
820 s->n_add_refs = n_buffers - 1;
821 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
822 {
823 s = vlib_get_buffer (vm, s->next_buffer);
824 s->n_add_refs = n_buffers - 1;
825 }
826
827 return n_buffers;
828}
829
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800830/** \brief Create multiple clones of buffer and store them
831 in the supplied array
832
833 @param vm - (vlib_main_t *) vlib main data structure pointer
834 @param src_buffer - (u32) source buffer index
835 @param buffers - (u32 * ) buffer index array
836 @param n_buffers - (u16) number of buffer clones requested (<=256)
837 @param head_end_offset - (u16) offset relative to current position
838 where packet head ends
839 @return - (u16) number of buffers actually cloned, may be
840 less than the number requested or zero
841*/
842always_inline u16
843vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
844 u16 n_buffers, u16 head_end_offset)
845{
846 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
847 u16 n_cloned = 0;
848
849 while (n_buffers > 256)
850 {
851 vlib_buffer_t *copy;
852 copy = vlib_buffer_copy (vm, s);
853 n_cloned += vlib_buffer_clone_256 (vm,
854 vlib_get_buffer_index (vm, copy),
855 (buffers + n_cloned),
856 256, head_end_offset);
857 n_buffers -= 256;
858 }
859 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
860 buffers + n_cloned,
861 n_buffers, head_end_offset);
862
863 return n_cloned;
864}
865
Damjan Marionc47ed032017-01-25 14:18:03 +0100866/** \brief Attach cloned tail to the buffer
867
868 @param vm - (vlib_main_t *) vlib main data structure pointer
869 @param head - (vlib_buffer_t *) head buffer
870 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
871*/
872
873always_inline void
874vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
875 vlib_buffer_t * tail)
876{
877 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200878 ASSERT (vlib_buffer_get_free_list_index (head) ==
879 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100880
881 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
882 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
883 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
884 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
885 head->next_buffer = vlib_get_buffer_index (vm, tail);
886 head->total_length_not_including_first_buffer = tail->current_length +
887 tail->total_length_not_including_first_buffer;
888
889next_segment:
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000890 clib_atomic_add_fetch (&tail->n_add_refs, 1);
Damjan Marionc47ed032017-01-25 14:18:03 +0100891
892 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
893 {
894 tail = vlib_get_buffer (vm, tail->next_buffer);
895 goto next_segment;
896 }
897}
898
Pierre Pfister328e99b2016-02-12 13:18:42 +0000899/* Initializes the buffer as an empty packet with no chained buffers. */
900always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400901vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000902{
903 first->total_length_not_including_first_buffer = 0;
904 first->current_length = 0;
905 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
906 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000907}
908
909/* The provided next_bi buffer index is appended to the end of the packet. */
910always_inline vlib_buffer_t *
Eyal Barib688fb12018-11-12 16:13:49 +0200911vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000912{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400913 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000914 last->next_buffer = next_bi;
915 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
916 next_buffer->current_length = 0;
917 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000918 return next_buffer;
919}
920
921/* Increases or decreases the packet length.
922 * It does not allocate or deallocate new buffers.
923 * Therefore, the added length must be compatible
924 * with the last buffer. */
925always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400926vlib_buffer_chain_increase_length (vlib_buffer_t * first,
927 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000928{
929 last->current_length += len;
930 if (first != last)
931 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000932}
933
934/* Copy data to the end of the packet and increases its length.
935 * It does not allocate new buffers.
936 * Returns the number of copied bytes. */
937always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400938vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100939 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400940 vlib_buffer_t * first,
941 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000942{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400943 u32 n_buffer_bytes =
944 vlib_buffer_free_list_buffer_size (vm, free_list_index);
945 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
946 u16 len = clib_min (data_len,
947 n_buffer_bytes - last->current_length -
948 last->current_data);
Dave Barach178cf492018-11-13 16:34:13 -0500949 clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length,
950 data, len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400951 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000952 return len;
953}
954
955/* Copy data to the end of the packet and increases its length.
956 * Allocates additional buffers from the free list if necessary.
957 * Returns the number of copied bytes.
958 * 'last' value is modified whenever new buffers are allocated and
959 * chained and points to the last buffer in the chain. */
960u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400961vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100962 vlib_buffer_free_list_index_t
963 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400964 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +0100965 vlib_buffer_t ** last, void *data,
966 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400967void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000968
Dave Barach9b8ffd92016-07-08 08:13:45 -0400969format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
970 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700971
Dave Barach9b8ffd92016-07-08 08:13:45 -0400972typedef struct
973{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700974 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400975 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700976
Damjan Mariond1274cb2018-03-13 21:32:17 +0100977 /* Number of buffers to allocate in each call to allocator. */
978 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700979
980 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +0100981 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700982
Dave Barach9b8ffd92016-07-08 08:13:45 -0400983 u32 *free_buffers;
Damjan Marion671e60e2018-12-30 18:09:59 +0100984
985 u8 *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700986} vlib_packet_template_t;
987
988void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
989 vlib_packet_template_t * t);
990
991void vlib_packet_template_init (vlib_main_t * vm,
992 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400993 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700994 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +0100995 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400996 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700997
Dave Barach9b8ffd92016-07-08 08:13:45 -0400998void *vlib_packet_template_get_packet (vlib_main_t * vm,
999 vlib_packet_template_t * t,
1000 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001001
1002always_inline void
1003vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1004{
1005 vec_free (t->packet_data);
1006}
1007
Ed Warnickecb9cada2015-12-08 15:45:58 -07001008/* Set a buffer quickly into "uninitialized" state. We want this to
1009 be extremely cheap and arrange for all fields that need to be
1010 initialized to be in the first 128 bits of the buffer. */
1011always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001012vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001013 vlib_buffer_free_list_t * fl)
1014{
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001015 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001016
Damjan Marion19010202016-03-24 17:17:47 +01001017 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001018 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
1019 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
1020 CLIB_CACHE_LINE_BYTES);
1021 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
1022 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +01001023
Ed Warnickecb9cada2015-12-08 15:45:58 -07001024 /* Make sure buffer template is sane. */
Damjan Marione58041f2019-01-18 19:56:09 +01001025 vlib_buffer_copy_template (dst, src);
Dave Barachf8690282017-03-01 11:38:02 -05001026
1027 /* Not in the first 16 octets. */
1028 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +01001029 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -05001030
Ed Warnickecb9cada2015-12-08 15:45:58 -07001031 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -05001032#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001033 _(current_data);
1034 _(current_length);
1035 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001036#undef _
Florin Corasb2215d62017-08-01 16:56:58 -07001037 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
1038 /* total_length_not_including_first_buffer is not in the template anymore
1039 * so it may actually not zeroed for some buffers. One option is to
1040 * uncomment the line lower (comes at a cost), the other, is to just not
1041 * care */
1042 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001043 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001044}
1045
1046always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001047vlib_buffer_add_to_free_list (vlib_main_t * vm,
1048 vlib_buffer_free_list_t * f,
1049 u32 buffer_index, u8 do_init)
1050{
Damjan Mariond1274cb2018-03-13 21:32:17 +01001051 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001052 vlib_buffer_t *b;
1053 b = vlib_get_buffer (vm, buffer_index);
1054 if (PREDICT_TRUE (do_init))
1055 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001056 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001057
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001058 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001059 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001060 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001061 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001062 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001063 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001064 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001065 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001066 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001067 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001068}
1069
Ed Warnickecb9cada2015-12-08 15:45:58 -07001070#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001071extern u32 *vlib_buffer_state_validation_lock;
1072extern uword *vlib_buffer_state_validation_hash;
1073extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001074#endif
1075
Dave Barach9b8ffd92016-07-08 08:13:45 -04001076static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001077vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1078{
1079#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001080 uword *p;
1081 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001082
1083 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1084
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001085 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001086 ;
1087
1088 p = hash_get (vlib_buffer_state_validation_hash, b);
1089
1090 /* If we don't know about b, declare it to be in the expected state */
1091 if (!p)
1092 {
1093 hash_set (vlib_buffer_state_validation_hash, b, expected);
1094 goto out;
1095 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001096
Ed Warnickecb9cada2015-12-08 15:45:58 -07001097 if (p[0] != expected)
1098 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001099 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001100 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001101 vlib_main_t *vm = &vlib_global_main;
1102
1103 cj_stop ();
1104
Ed Warnickecb9cada2015-12-08 15:45:58 -07001105 bi = vlib_get_buffer_index (vm, b);
1106
1107 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001108 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1109 vlib_time_now (vm), bi,
1110 p[0] ? "busy" : "free", expected ? "busy" : "free");
1111 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001112 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001113out:
1114 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001115 *vlib_buffer_state_validation_lock = 0;
1116 clib_mem_set_heap (oldheap);
1117#endif
1118}
1119
Dave Barach9b8ffd92016-07-08 08:13:45 -04001120static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001121vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1122{
1123#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001124 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001125
1126 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1127
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001128 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001129 ;
1130
1131 hash_set (vlib_buffer_state_validation_hash, b, expected);
1132
Dave Barach9b8ffd92016-07-08 08:13:45 -04001133 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001134 *vlib_buffer_state_validation_lock = 0;
1135 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001136#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001137}
1138
Klement Sekera75e7d132017-09-20 08:26:30 +02001139/** minimum data size of first buffer in a buffer chain */
1140#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1141
1142/**
1143 * @brief compress buffer chain in a way where the first buffer is at least
1144 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1145 *
1146 * @param[in] vm - vlib_main
1147 * @param[in,out] first - first buffer in chain
1148 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1149 * from the chain
1150 */
1151always_inline void
1152vlib_buffer_chain_compress (vlib_main_t * vm,
1153 vlib_buffer_t * first, u32 ** discard_vector)
1154{
1155 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1156 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1157 {
1158 /* this is already big enough or not a chain */
1159 return;
1160 }
1161 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001162 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001163 vlib_buffer_free_list_t *free_list =
1164 vlib_buffer_get_buffer_free_list (vm, first, &index);
1165
1166 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1167 free_list->n_data_bytes -
1168 first->current_data);
1169 do
1170 {
1171 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1172 u32 need = want_first_size - first->current_length;
1173 u32 amount_to_copy = clib_min (need, second->current_length);
Dave Barach178cf492018-11-13 16:34:13 -05001174 clib_memcpy_fast (((u8 *) vlib_buffer_get_current (first)) +
1175 first->current_length,
1176 vlib_buffer_get_current (second), amount_to_copy);
Klement Sekera75e7d132017-09-20 08:26:30 +02001177 first->current_length += amount_to_copy;
Klement Sekera69db1a62018-12-11 16:55:33 +01001178 second->current_data += amount_to_copy;
1179 second->current_length -= amount_to_copy;
Klement Sekera75e7d132017-09-20 08:26:30 +02001180 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1181 {
1182 first->total_length_not_including_first_buffer -= amount_to_copy;
1183 }
1184 if (!second->current_length)
1185 {
1186 vec_add1 (*discard_vector, first->next_buffer);
1187 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1188 {
1189 first->next_buffer = second->next_buffer;
1190 }
1191 else
1192 {
1193 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1194 }
1195 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1196 }
1197 }
1198 while ((first->current_length < want_first_size) &&
1199 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1200}
1201
Eyal Barid3d42412018-11-05 13:29:25 +02001202/**
1203 * @brief linearize buffer chain - the first buffer is filled, if needed,
1204 * buffers are allocated and filled, returns free space in last buffer or
1205 * negative on failure
1206 *
1207 * @param[in] vm - vlib_main
1208 * @param[in,out] first - first buffer in chain
1209 */
1210always_inline int
1211vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * first)
1212{
1213 vlib_buffer_t *b = first;
1214 vlib_buffer_free_list_t *fl =
1215 vlib_buffer_get_free_list (vm, vlib_buffer_get_free_list_index (b));
1216 u32 buf_len = fl->n_data_bytes;
1217 // free buffer chain starting from the second buffer
1218 int free_count = (b->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
1219 u32 chain_to_free = b->next_buffer;
1220
1221 u32 len = vlib_buffer_length_in_chain (vm, b);
1222 u32 free_len = buf_len - b->current_data - b->current_length;
1223 int alloc_len = clib_max (len - free_len, 0); //use the free len in the first buffer
1224 int n_buffers = (alloc_len + buf_len - 1) / buf_len;
1225 u32 new_buffers[n_buffers];
1226
1227 u32 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
1228 if (n_alloc != n_buffers)
1229 {
1230 vlib_buffer_free_no_next (vm, new_buffers, n_alloc);
1231 return -1;
1232 }
1233
1234 vlib_buffer_t *s = b;
1235 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1236 {
1237 s = vlib_get_buffer (vm, s->next_buffer);
1238 int d_free_len = buf_len - b->current_data - b->current_length;
1239 ASSERT (d_free_len >= 0);
1240 // chain buf and split write
1241 u32 copy_len = clib_min (d_free_len, s->current_length);
1242 u8 *d = vlib_buffer_put_uninit (b, copy_len);
1243 clib_memcpy (d, vlib_buffer_get_current (s), copy_len);
1244 int rest = s->current_length - copy_len;
1245 if (rest > 0)
1246 {
1247 //prev buf is full
1248 ASSERT (vlib_buffer_get_tail (b) == b->data + buf_len);
1249 ASSERT (n_buffers > 0);
1250 b = vlib_buffer_chain_buffer (vm, b, new_buffers[--n_buffers]);
1251 //make full use of the new buffers
1252 b->current_data = 0;
1253 d = vlib_buffer_put_uninit (b, rest);
1254 clib_memcpy (d, vlib_buffer_get_current (s) + copy_len, rest);
1255 }
1256 }
1257 vlib_buffer_free (vm, &chain_to_free, free_count);
1258 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1259 if (b == first) /* no buffers addeed */
1260 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1261 ASSERT (len == vlib_buffer_length_in_chain (vm, first));
1262 ASSERT (n_buffers == 0);
1263 return buf_len - b->current_data - b->current_length;
1264}
1265
Ed Warnickecb9cada2015-12-08 15:45:58 -07001266#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001267
1268/*
1269 * fd.io coding-style-patch-verification: ON
1270 *
1271 * Local Variables:
1272 * eval: (c-set-style "gnu")
1273 * End:
1274 */