blob: 1110c206e525397414e287c51ac5b67ce250f276 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
46/** \file
47 vlib buffer access methods.
48*/
49
50
51/** \brief Translate buffer index into buffer pointer
52
53 @param vm - (vlib_main_t *) vlib main data structure pointer
54 @param buffer_index - (u32) buffer index
55 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040056*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070057always_inline vlib_buffer_t *
58vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
59{
Damjan Mariond1274cb2018-03-13 21:32:17 +010060 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020061 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
62 ASSERT (offset < bm->buffer_mem_size);
63
64 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070065}
66
Damjan Marionafe56de2018-05-17 12:44:00 +020067/** \brief Translate array of buffer indices into buffer pointers with offset
68
69 @param vm - (vlib_main_t *) vlib main data structure pointer
70 @param bi - (u32 *) array of buffer indices
71 @param b - (void **) array to store buffer pointers
72 @param count - (uword) number of elements
73 @param offset - (i32) offset applied to each pointer
74*/
75static_always_inline void
76vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
77 i32 offset)
78{
79#ifdef CLIB_HAVE_VEC256
80 u64x4 off = u64x4_splat (buffer_main.buffer_mem_start + offset);
81 /* if count is not const, compiler will not unroll while loop
82 se we maintain two-in-parallel variant */
83 while (count >= 8)
84 {
85 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
86 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
87 /* shift and add to get vlib_buffer_t pointer */
88 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
89 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
90 b += 8;
91 bi += 8;
92 count -= 8;
93 }
94#endif
95 while (count >= 4)
96 {
97#ifdef CLIB_HAVE_VEC256
98 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
99 /* shift and add to get vlib_buffer_t pointer */
100 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800101#elif defined (CLIB_HAVE_VEC128)
Damjan Marion5df580e2018-07-27 01:47:57 +0200102 u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
103 u32x4 bi4 = u32x4_load_unaligned (bi);
104 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800105#if defined (__aarch64__)
106 u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
107#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200108 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
109 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800110#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200111 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
112 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200113#else
114 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
115 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
116 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
117 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
118#endif
119 b += 4;
120 bi += 4;
121 count -= 4;
122 }
123 while (count)
124 {
125 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
126 b += 1;
127 bi += 1;
128 count -= 1;
129 }
130}
131
132/** \brief Translate array of buffer indices into buffer pointers
133
134 @param vm - (vlib_main_t *) vlib main data structure pointer
135 @param bi - (u32 *) array of buffer indices
136 @param b - (vlib_buffer_t **) array to store buffer pointers
137 @param count - (uword) number of elements
138*/
139
140static_always_inline void
141vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
142{
143 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
144}
145
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146/** \brief Translate buffer pointer into buffer index
147
148 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400149 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400151*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200152
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400154vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100156 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200157 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
158 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
159 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400160 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
162}
163
Damjan Marionafe56de2018-05-17 12:44:00 +0200164/** \brief Translate array of buffer pointers into buffer indices with offset
165
166 @param vm - (vlib_main_t *) vlib main data structure pointer
167 @param b - (void **) array of buffer pointers
168 @param bi - (u32 *) array to store buffer indices
169 @param count - (uword) number of elements
170 @param offset - (i32) offset applied to each pointer
171*/
172static_always_inline void
173vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
174 uword count, i32 offset)
175{
176#ifdef CLIB_HAVE_VEC256
177 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
178 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - offset);
179
180 while (count >= 8)
181 {
182 /* load 4 pointers into 256-bit register */
183 u64x4 v0 = u64x4_load_unaligned (b);
184 u64x4 v1 = u64x4_load_unaligned (b + 4);
185 u32x8 v2, v3;
186
187 v0 -= off4;
188 v1 -= off4;
189
190 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
191 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
192
193 /* permute 256-bit register so lower u32s of each buffer index are
194 * placed into lower 128-bits */
195 v2 = u32x8_permute ((u32x8) v0, mask);
196 v3 = u32x8_permute ((u32x8) v1, mask);
197
198 /* extract lower 128-bits and save them to the array of buffer indices */
199 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
200 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
201 bi += 8;
202 b += 8;
203 count -= 8;
204 }
205#endif
206 while (count >= 4)
207 {
208 /* equivalent non-nector implementation */
209 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
210 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
211 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
212 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
213 bi += 4;
214 b += 4;
215 count -= 4;
216 }
217 while (count)
218 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400219 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200220 bi += 1;
221 b += 1;
222 count -= 1;
223 }
224}
225
226/** \brief Translate array of buffer pointers into buffer indices
227
228 @param vm - (vlib_main_t *) vlib main data structure pointer
229 @param b - (vlib_buffer_t **) array of buffer pointers
230 @param bi - (u32 *) array to store buffer indices
231 @param count - (uword) number of elements
232*/
233static_always_inline void
234vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
235 uword count)
236{
237 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
238}
239
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240/** \brief Get next buffer in buffer linklist, or zero for end of list.
241
242 @param vm - (vlib_main_t *) vlib main data structure pointer
243 @param b - (void *) buffer pointer
244 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246always_inline vlib_buffer_t *
247vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
248{
249 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400250 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251}
252
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
254 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255
256/** \brief Get length in bytes of the buffer chain
257
258 @param vm - (vlib_main_t *) vlib main data structure pointer
259 @param b - (void *) buffer pointer
260 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400261*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262always_inline uword
263vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
264{
Damjan Marion072401e2017-07-13 18:53:27 +0200265 uword len = b->current_length;
266
267 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
268 return len;
269
270 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
271 return len + b->total_length_not_including_first_buffer;
272
273 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274}
275
276/** \brief Get length in bytes of the buffer index buffer chain
277
278 @param vm - (vlib_main_t *) vlib main data structure pointer
279 @param bi - (u32) buffer index
280 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400281*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282always_inline uword
283vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
284{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400285 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286 return vlib_buffer_length_in_chain (vm, b);
287}
288
289/** \brief Copy buffer contents to memory
290
291 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400292 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 @param contents - (u8 *) memory, <strong>must be large enough</strong>
294 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400295*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296always_inline uword
297vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
298{
299 uword content_len = 0;
300 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400301 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
303 while (1)
304 {
305 b = vlib_get_buffer (vm, buffer_index);
306 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100307 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400309 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310 break;
311 buffer_index = b->next_buffer;
312 }
313
314 return content_len;
315}
316
Damjan Marion8f499362018-10-22 13:07:02 +0200317always_inline uword
318vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100320 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marioncef87f12017-10-05 15:32:41 +0200321 vlib_buffer_pool_t *pool = vec_elt_at_index (bm->buffer_pools,
322 b->buffer_pool_index);
Damjan Marioncef87f12017-10-05 15:32:41 +0200323 return vlib_physmem_virtual_to_physical (vm, pool->physmem_region, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324}
325
Damjan Marion8f499362018-10-22 13:07:02 +0200326always_inline uword
327vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b)
328{
329 return vlib_buffer_get_pa (vm, b) + b->current_data;
330}
331
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332/** \brief Prefetch buffer metadata by buffer index
333 The first 64 bytes of buffer contains most header information
334
335 @param vm - (vlib_main_t *) vlib main data structure pointer
336 @param bi - (u32) buffer index
337 @param type - LOAD, STORE. In most cases, STORE is the right answer
338*/
339/* Prefetch buffer header given index. */
340#define vlib_prefetch_buffer_with_index(vm,bi,type) \
341 do { \
342 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
343 vlib_prefetch_buffer_header (_b, type); \
344 } while (0)
345
346#if 0
347/* Iterate over known allocated vlib bufs. You probably do not want
348 * to do this!
349 @param vm the vlib_main_t
350 @param bi found allocated buffer index
351 @param body operation to perform on buffer index
352 function executes body for each allocated buffer index
353 */
354#define vlib_buffer_foreach_allocated(vm,bi,body) \
355do { \
356 vlib_main_t * _vmain = (vm); \
357 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
358 hash_pair_t * _vbpair; \
359 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
360 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
361 (bi) = _vbpair->key; \
362 body; \
363 } \
364 })); \
365} while (0)
366#endif
367
Dave Barach9b8ffd92016-07-08 08:13:45 -0400368typedef enum
369{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700370 /* Index is unknown. */
371 VLIB_BUFFER_UNKNOWN,
372
373 /* Index is known and free/allocated. */
374 VLIB_BUFFER_KNOWN_FREE,
375 VLIB_BUFFER_KNOWN_ALLOCATED,
376} vlib_buffer_known_state_t;
377
Damjan Marionc8a26c62017-11-24 20:15:23 +0100378void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
379 uword n_buffers,
380 vlib_buffer_known_state_t
381 expected_state);
382
Ed Warnickecb9cada2015-12-08 15:45:58 -0700383always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800384vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100386 vlib_buffer_main_t *bm = &buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387
Damjan Marion6b0f5892017-07-27 04:01:24 -0400388 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400389 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400390 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700391 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
392}
393
394always_inline void
Steven899a84b2018-01-29 20:09:09 -0800395vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396 vlib_buffer_known_state_t state)
397{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100398 vlib_buffer_main_t *bm = &buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800399
Damjan Marion6b0f5892017-07-27 04:01:24 -0400400 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700401 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400402 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700403}
404
405/* Validates sanity of a single buffer.
406 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400407u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
408 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700409
Ed Warnickecb9cada2015-12-08 15:45:58 -0700410always_inline u32
411vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400412{
413 return round_pow2 (size, sizeof (vlib_buffer_t));
414}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700415
Damjan Mariondac03522018-02-01 15:30:13 +0100416always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200417vlib_buffer_get_free_list_index (vlib_buffer_t * b)
418{
Damjan Mariondac03522018-02-01 15:30:13 +0100419 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
420 return b->free_list_index;
421
422 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200423}
424
425always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100426vlib_buffer_set_free_list_index (vlib_buffer_t * b,
427 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200428{
Damjan Mariondac03522018-02-01 15:30:13 +0100429 if (PREDICT_FALSE (index))
430 {
431 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
432 b->free_list_index = index;
433 }
434 else
435 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200436}
437
Ed Warnickecb9cada2015-12-08 15:45:58 -0700438/** \brief Allocate buffers from specific freelist into supplied array
439
440 @param vm - (vlib_main_t *) vlib main data structure pointer
441 @param buffers - (u32 * ) buffer index array
442 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400443 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700444 less than the number requested or zero
445*/
Damjan Marion878c6092017-01-04 13:19:27 +0100446always_inline u32
447vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
448 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100449 u32 n_buffers,
450 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100451{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100452 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100453 vlib_buffer_free_list_t *fl;
454 u32 *src;
455 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100456
Damjan Marionc8a26c62017-11-24 20:15:23 +0100457 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100458
Damjan Mariond1274cb2018-03-13 21:32:17 +0100459 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100460
461 len = vec_len (fl->buffers);
462
463 if (PREDICT_FALSE (len < n_buffers))
464 {
465 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100466 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
467 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100468
469 /* even if fill free list didn't manage to refill free list
470 we should give what we have */
471 n_buffers = clib_min (len, n_buffers);
472
473 /* following code is intentionaly duplicated to allow compiler
474 to optimize fast path when n_buffers is constant value */
475 src = fl->buffers + len - n_buffers;
476 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
477 _vec_len (fl->buffers) -= n_buffers;
478
479 /* Verify that buffers are known free. */
480 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
481 VLIB_BUFFER_KNOWN_FREE);
482
483 return n_buffers;
484 }
485
486 src = fl->buffers + len - n_buffers;
487 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
488 _vec_len (fl->buffers) -= n_buffers;
489
490 /* Verify that buffers are known free. */
491 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
492 VLIB_BUFFER_KNOWN_FREE);
493
494 return n_buffers;
495}
496
497/** \brief Allocate buffers into supplied array
498
499 @param vm - (vlib_main_t *) vlib main data structure pointer
500 @param buffers - (u32 * ) buffer index array
501 @param n_buffers - (u32) number of buffers requested
502 @return - (u32) number of buffers actually allocated, may be
503 less than the number requested or zero
504*/
505always_inline u32
506vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
507{
508 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
509 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100510}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700511
Damjan Marionc58408c2018-01-18 14:54:04 +0100512/** \brief Allocate buffers into ring
513
514 @param vm - (vlib_main_t *) vlib main data structure pointer
515 @param buffers - (u32 * ) buffer index ring
516 @param start - (u32) first slot in the ring
517 @param ring_size - (u32) ring size
518 @param n_buffers - (u32) number of buffers requested
519 @return - (u32) number of buffers actually allocated, may be
520 less than the number requested or zero
521*/
522always_inline u32
523vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
524 u32 ring_size, u32 n_buffers)
525{
526 u32 n_alloc;
527
528 ASSERT (n_buffers <= ring_size);
529
530 if (PREDICT_TRUE (start + n_buffers <= ring_size))
531 return vlib_buffer_alloc (vm, ring + start, n_buffers);
532
533 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
534
535 if (PREDICT_TRUE (n_alloc == ring_size - start))
536 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
537
538 return n_alloc;
539}
540
Ed Warnickecb9cada2015-12-08 15:45:58 -0700541/** \brief Free buffers
542 Frees the entire buffer chain for each buffer
543
544 @param vm - (vlib_main_t *) vlib main data structure pointer
545 @param buffers - (u32 * ) buffer index array
546 @param n_buffers - (u32) number of buffers to free
547
548*/
Damjan Marion878c6092017-01-04 13:19:27 +0100549always_inline void
550vlib_buffer_free (vlib_main_t * vm,
551 /* pointer to first buffer */
552 u32 * buffers,
553 /* number of buffers to free */
554 u32 n_buffers)
555{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100556 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100557
558 ASSERT (bm->cb.vlib_buffer_free_cb);
559
560 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
561}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700562
563/** \brief Free buffers, does not free the buffer chain for each buffer
564
565 @param vm - (vlib_main_t *) vlib main data structure pointer
566 @param buffers - (u32 * ) buffer index array
567 @param n_buffers - (u32) number of buffers to free
568
569*/
Damjan Marion878c6092017-01-04 13:19:27 +0100570always_inline void
571vlib_buffer_free_no_next (vlib_main_t * vm,
572 /* pointer to first buffer */
573 u32 * buffers,
574 /* number of buffers to free */
575 u32 n_buffers)
576{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100577 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100578
579 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
580
581 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
582}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700583
584/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400585 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700586
587 @param vm - (vlib_main_t *) vlib main data structure pointer
588 @param buffer_index - (u32) buffer index to free
589*/
590always_inline void
591vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
592{
593 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
594}
595
Damjan Mariona3731492018-02-25 22:50:39 +0100596/** \brief Free buffers from ring
597
598 @param vm - (vlib_main_t *) vlib main data structure pointer
599 @param buffers - (u32 * ) buffer index ring
600 @param start - (u32) first slot in the ring
601 @param ring_size - (u32) ring size
602 @param n_buffers - (u32) number of buffers
603*/
604always_inline void
605vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
606 u32 ring_size, u32 n_buffers)
607{
608 ASSERT (n_buffers <= ring_size);
609
610 if (PREDICT_TRUE (start + n_buffers <= ring_size))
611 {
612 vlib_buffer_free (vm, ring + start, n_buffers);
613 }
614 else
615 {
616 vlib_buffer_free (vm, ring + start, ring_size - start);
617 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
618 }
619}
620
Damjan Marioncef1db92018-03-28 18:27:38 +0200621/** \brief Free buffers from ring without freeing tail buffers
622
623 @param vm - (vlib_main_t *) vlib main data structure pointer
624 @param buffers - (u32 * ) buffer index ring
625 @param start - (u32) first slot in the ring
626 @param ring_size - (u32) ring size
627 @param n_buffers - (u32) number of buffers
628*/
629always_inline void
630vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
631 u32 ring_size, u32 n_buffers)
632{
633 ASSERT (n_buffers <= ring_size);
634
635 if (PREDICT_TRUE (start + n_buffers <= ring_size))
636 {
Damjan Marion4a973932018-06-09 19:29:16 +0200637 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +0200638 }
639 else
640 {
641 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
642 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
643 }
644}
Damjan Mariona3731492018-02-25 22:50:39 +0100645
Ed Warnickecb9cada2015-12-08 15:45:58 -0700646/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100647vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
648 u32 n_data_bytes,
649 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100650always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100651vlib_buffer_delete_free_list (vlib_main_t * vm,
652 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100653{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100654 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100655
656 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
657
658 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
659}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700660
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100661/* Make sure we have at least given number of unaligned buffers. */
662void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
663 vlib_buffer_free_list_t *
664 free_list,
665 uword n_unaligned_buffers);
666
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100667always_inline vlib_buffer_free_list_t *
668vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100669 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100670{
Damjan Mariondac03522018-02-01 15:30:13 +0100671 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100672
Damjan Marion072401e2017-07-13 18:53:27 +0200673 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Mariond1274cb2018-03-13 21:32:17 +0100674 return pool_elt_at_index (vm->buffer_free_list_pool, i);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100675}
676
Ed Warnickecb9cada2015-12-08 15:45:58 -0700677always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100678vlib_buffer_get_free_list (vlib_main_t * vm,
679 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700680{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400681 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700682
Damjan Mariond1274cb2018-03-13 21:32:17 +0100683 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700684
685 /* Sanity: indices must match. */
686 ASSERT (f->index == free_list_index);
687
688 return f;
689}
690
691always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100692vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
693 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700694{
Damjan Mariondac03522018-02-01 15:30:13 +0100695 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700696 return f->n_data_bytes;
697}
698
Dave Barach9b8ffd92016-07-08 08:13:45 -0400699void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700700
701/* Reasonably fast buffer copy routine. */
702always_inline void
703vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
704{
705 while (n >= 4)
706 {
707 dst[0] = src[0];
708 dst[1] = src[1];
709 dst[2] = src[2];
710 dst[3] = src[3];
711 dst += 4;
712 src += 4;
713 n -= 4;
714 }
715 while (n > 0)
716 {
717 dst[0] = src[0];
718 dst += 1;
719 src += 1;
720 n -= 1;
721 }
722}
723
Ed Warnickecb9cada2015-12-08 15:45:58 -0700724/* Append given data to end of buffer, possibly allocating new buffers. */
725u32 vlib_buffer_add_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100726 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400727 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700728
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100729/* duplicate all buffers in chain */
730always_inline vlib_buffer_t *
731vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
732{
733 vlib_buffer_t *s, *d, *fd;
734 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100735 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100736 int i;
737
738 s = b;
739 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
740 {
741 n_buffers++;
742 s = vlib_get_buffer (vm, s->next_buffer);
743 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700744 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100745
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100746 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500747
748 /* No guarantee that we'll get all the buffers we asked for */
749 if (PREDICT_FALSE (n_alloc < n_buffers))
750 {
751 if (n_alloc > 0)
752 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500753 return 0;
754 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100755
756 /* 1st segment */
757 s = b;
758 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100759 d->current_data = s->current_data;
760 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100761 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100762 d->total_length_not_including_first_buffer =
763 s->total_length_not_including_first_buffer;
764 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Neale Ranns5b8ed982018-07-23 05:30:12 -0400765 clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
Damjan Mariondce05452016-12-01 11:59:33 +0100766 clib_memcpy (vlib_buffer_get_current (d),
767 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100768
769 /* next segments */
770 for (i = 1; i < n_buffers; i++)
771 {
772 /* previous */
773 d->next_buffer = new_buffers[i];
774 /* current */
775 s = vlib_get_buffer (vm, s->next_buffer);
776 d = vlib_get_buffer (vm, new_buffers[i]);
777 d->current_data = s->current_data;
778 d->current_length = s->current_length;
779 clib_memcpy (vlib_buffer_get_current (d),
780 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100781 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100782 }
783
784 return fd;
785}
786
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800787/** \brief Create a maximum of 256 clones of buffer and store them
788 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100789
790 @param vm - (vlib_main_t *) vlib main data structure pointer
791 @param src_buffer - (u32) source buffer index
792 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800793 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100794 @param head_end_offset - (u16) offset relative to current position
795 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800796 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100797 less than the number requested or zero
798*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800799always_inline u16
800vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
801 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100802{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800803 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100804 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
805
806 ASSERT (s->n_add_refs == 0);
807 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800808 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100809
810 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
811 {
812 buffers[0] = src_buffer;
813 for (i = 1; i < n_buffers; i++)
814 {
815 vlib_buffer_t *d;
816 d = vlib_buffer_copy (vm, s);
817 if (d == 0)
818 return i;
819 buffers[i] = vlib_get_buffer_index (vm, d);
820
821 }
822 return n_buffers;
823 }
824
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800825 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100826 {
827 buffers[0] = src_buffer;
828 return 1;
829 }
830
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800831 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
832 vlib_buffer_get_free_list_index
833 (s));
834
Damjan Marionc47ed032017-01-25 14:18:03 +0100835 for (i = 0; i < n_buffers; i++)
836 {
837 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
838 d->current_data = s->current_data;
839 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200840 vlib_buffer_set_free_list_index (d,
841 vlib_buffer_get_free_list_index (s));
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200842
843 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100844 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200845 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
846 {
847 d->total_length_not_including_first_buffer +=
848 s->total_length_not_including_first_buffer;
849 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100850 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
851 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
852 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Neale Ranns5b8ed982018-07-23 05:30:12 -0400853 clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
Damjan Marionc47ed032017-01-25 14:18:03 +0100854 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
855 head_end_offset);
856 d->next_buffer = src_buffer;
857 }
858 vlib_buffer_advance (s, head_end_offset);
859 s->n_add_refs = n_buffers - 1;
860 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
861 {
862 s = vlib_get_buffer (vm, s->next_buffer);
863 s->n_add_refs = n_buffers - 1;
864 }
865
866 return n_buffers;
867}
868
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800869/** \brief Create multiple clones of buffer and store them
870 in the supplied array
871
872 @param vm - (vlib_main_t *) vlib main data structure pointer
873 @param src_buffer - (u32) source buffer index
874 @param buffers - (u32 * ) buffer index array
875 @param n_buffers - (u16) number of buffer clones requested (<=256)
876 @param head_end_offset - (u16) offset relative to current position
877 where packet head ends
878 @return - (u16) number of buffers actually cloned, may be
879 less than the number requested or zero
880*/
881always_inline u16
882vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
883 u16 n_buffers, u16 head_end_offset)
884{
885 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
886 u16 n_cloned = 0;
887
888 while (n_buffers > 256)
889 {
890 vlib_buffer_t *copy;
891 copy = vlib_buffer_copy (vm, s);
892 n_cloned += vlib_buffer_clone_256 (vm,
893 vlib_get_buffer_index (vm, copy),
894 (buffers + n_cloned),
895 256, head_end_offset);
896 n_buffers -= 256;
897 }
898 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
899 buffers + n_cloned,
900 n_buffers, head_end_offset);
901
902 return n_cloned;
903}
904
Damjan Marionc47ed032017-01-25 14:18:03 +0100905/** \brief Attach cloned tail to the buffer
906
907 @param vm - (vlib_main_t *) vlib main data structure pointer
908 @param head - (vlib_buffer_t *) head buffer
909 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
910*/
911
912always_inline void
913vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
914 vlib_buffer_t * tail)
915{
916 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200917 ASSERT (vlib_buffer_get_free_list_index (head) ==
918 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100919
920 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
921 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
922 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
923 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
924 head->next_buffer = vlib_get_buffer_index (vm, tail);
925 head->total_length_not_including_first_buffer = tail->current_length +
926 tail->total_length_not_including_first_buffer;
927
928next_segment:
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000929 clib_atomic_add_fetch (&tail->n_add_refs, 1);
Damjan Marionc47ed032017-01-25 14:18:03 +0100930
931 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
932 {
933 tail = vlib_get_buffer (vm, tail->next_buffer);
934 goto next_segment;
935 }
936}
937
Pierre Pfister328e99b2016-02-12 13:18:42 +0000938/* Initializes the buffer as an empty packet with no chained buffers. */
939always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400940vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000941{
942 first->total_length_not_including_first_buffer = 0;
943 first->current_length = 0;
944 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
945 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000946}
947
948/* The provided next_bi buffer index is appended to the end of the packet. */
949always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400950vlib_buffer_chain_buffer (vlib_main_t * vm,
951 vlib_buffer_t * first,
952 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000953{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400954 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000955 last->next_buffer = next_bi;
956 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
957 next_buffer->current_length = 0;
958 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000959 return next_buffer;
960}
961
962/* Increases or decreases the packet length.
963 * It does not allocate or deallocate new buffers.
964 * Therefore, the added length must be compatible
965 * with the last buffer. */
966always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400967vlib_buffer_chain_increase_length (vlib_buffer_t * first,
968 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000969{
970 last->current_length += len;
971 if (first != last)
972 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000973}
974
975/* Copy data to the end of the packet and increases its length.
976 * It does not allocate new buffers.
977 * Returns the number of copied bytes. */
978always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400979vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100980 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400981 vlib_buffer_t * first,
982 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000983{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400984 u32 n_buffer_bytes =
985 vlib_buffer_free_list_buffer_size (vm, free_list_index);
986 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
987 u16 len = clib_min (data_len,
988 n_buffer_bytes - last->current_length -
989 last->current_data);
990 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
991 len);
992 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000993 return len;
994}
995
996/* Copy data to the end of the packet and increases its length.
997 * Allocates additional buffers from the free list if necessary.
998 * Returns the number of copied bytes.
999 * 'last' value is modified whenever new buffers are allocated and
1000 * chained and points to the last buffer in the chain. */
1001u16
Dave Barach9b8ffd92016-07-08 08:13:45 -04001002vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +01001003 vlib_buffer_free_list_index_t
1004 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001005 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +01001006 vlib_buffer_t ** last, void *data,
1007 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001008void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001009
Dave Barach9b8ffd92016-07-08 08:13:45 -04001010format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
1011 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001012
Dave Barach9b8ffd92016-07-08 08:13:45 -04001013typedef struct
1014{
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001016 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001017
Damjan Mariond1274cb2018-03-13 21:32:17 +01001018 /* Number of buffers to allocate in each call to allocator. */
1019 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001020
1021 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +01001022 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001023
Dave Barach9b8ffd92016-07-08 08:13:45 -04001024 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001025} vlib_packet_template_t;
1026
1027void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
1028 vlib_packet_template_t * t);
1029
1030void vlib_packet_template_init (vlib_main_t * vm,
1031 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001032 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001033 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +01001034 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001035 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001036
Dave Barach9b8ffd92016-07-08 08:13:45 -04001037void *vlib_packet_template_get_packet (vlib_main_t * vm,
1038 vlib_packet_template_t * t,
1039 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001040
1041always_inline void
1042vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1043{
1044 vec_free (t->packet_data);
1045}
1046
1047always_inline u32
1048unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
1049{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001050 serialize_stream_t *s = &m->stream;
1051 vlib_serialize_buffer_main_t *sm
1052 = uword_to_pointer (m->stream.data_function_opaque,
1053 vlib_serialize_buffer_main_t *);
1054 vlib_main_t *vm = sm->vlib_main;
1055 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001056
1057 n = s->n_buffer_bytes - s->current_buffer_index;
1058 if (sm->last_buffer != ~0)
1059 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001060 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001061 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1062 {
1063 b = vlib_get_buffer (vm, b->next_buffer);
1064 n += b->current_length;
1065 }
1066 }
1067
Dave Barach9b8ffd92016-07-08 08:13:45 -04001068 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001069 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
1070 n += vlib_buffer_index_length_in_chain (vm, f[0]);
1071 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -04001072/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001073
1074 return n;
1075}
1076
Ed Warnickecb9cada2015-12-08 15:45:58 -07001077/* Set a buffer quickly into "uninitialized" state. We want this to
1078 be extremely cheap and arrange for all fields that need to be
1079 initialized to be in the first 128 bits of the buffer. */
1080always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001081vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001082 vlib_buffer_free_list_t * fl)
1083{
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001084 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001085
Damjan Marion19010202016-03-24 17:17:47 +01001086 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001087 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
1088 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
1089 CLIB_CACHE_LINE_BYTES);
1090 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
1091 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +01001092
Ed Warnickecb9cada2015-12-08 15:45:58 -07001093 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +02001094 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001095
Dave Barachf8690282017-03-01 11:38:02 -05001096 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
1097 STRUCT_MARK_PTR (src, template_start),
1098 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
1099 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
1100
1101 /* Not in the first 16 octets. */
1102 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +01001103 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -05001104
Ed Warnickecb9cada2015-12-08 15:45:58 -07001105 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -05001106#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001107 _(current_data);
1108 _(current_length);
1109 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001110#undef _
Florin Corasb2215d62017-08-01 16:56:58 -07001111 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
1112 /* total_length_not_including_first_buffer is not in the template anymore
1113 * so it may actually not zeroed for some buffers. One option is to
1114 * uncomment the line lower (comes at a cost), the other, is to just not
1115 * care */
1116 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001117 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001118}
1119
1120always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001121vlib_buffer_add_to_free_list (vlib_main_t * vm,
1122 vlib_buffer_free_list_t * f,
1123 u32 buffer_index, u8 do_init)
1124{
Damjan Mariond1274cb2018-03-13 21:32:17 +01001125 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001126 vlib_buffer_t *b;
1127 b = vlib_get_buffer (vm, buffer_index);
1128 if (PREDICT_TRUE (do_init))
1129 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001130 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001131
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001132 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001133 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001134 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001135 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001136 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001137 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001138 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001139 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001140 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001141 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001142}
1143
Ed Warnickecb9cada2015-12-08 15:45:58 -07001144#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001145extern u32 *vlib_buffer_state_validation_lock;
1146extern uword *vlib_buffer_state_validation_hash;
1147extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001148#endif
1149
Dave Barach9b8ffd92016-07-08 08:13:45 -04001150static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001151vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1152{
1153#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001154 uword *p;
1155 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001156
1157 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1158
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001159 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001160 ;
1161
1162 p = hash_get (vlib_buffer_state_validation_hash, b);
1163
1164 /* If we don't know about b, declare it to be in the expected state */
1165 if (!p)
1166 {
1167 hash_set (vlib_buffer_state_validation_hash, b, expected);
1168 goto out;
1169 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001170
Ed Warnickecb9cada2015-12-08 15:45:58 -07001171 if (p[0] != expected)
1172 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001173 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001174 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001175 vlib_main_t *vm = &vlib_global_main;
1176
1177 cj_stop ();
1178
Ed Warnickecb9cada2015-12-08 15:45:58 -07001179 bi = vlib_get_buffer_index (vm, b);
1180
1181 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001182 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1183 vlib_time_now (vm), bi,
1184 p[0] ? "busy" : "free", expected ? "busy" : "free");
1185 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001186 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001187out:
1188 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001189 *vlib_buffer_state_validation_lock = 0;
1190 clib_mem_set_heap (oldheap);
1191#endif
1192}
1193
Dave Barach9b8ffd92016-07-08 08:13:45 -04001194static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001195vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1196{
1197#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001198 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001199
1200 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1201
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001202 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001203 ;
1204
1205 hash_set (vlib_buffer_state_validation_hash, b, expected);
1206
Dave Barach9b8ffd92016-07-08 08:13:45 -04001207 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001208 *vlib_buffer_state_validation_lock = 0;
1209 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001210#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001211}
1212
Klement Sekera75e7d132017-09-20 08:26:30 +02001213/** minimum data size of first buffer in a buffer chain */
1214#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1215
1216/**
1217 * @brief compress buffer chain in a way where the first buffer is at least
1218 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1219 *
1220 * @param[in] vm - vlib_main
1221 * @param[in,out] first - first buffer in chain
1222 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1223 * from the chain
1224 */
1225always_inline void
1226vlib_buffer_chain_compress (vlib_main_t * vm,
1227 vlib_buffer_t * first, u32 ** discard_vector)
1228{
1229 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1230 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1231 {
1232 /* this is already big enough or not a chain */
1233 return;
1234 }
1235 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001236 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001237 vlib_buffer_free_list_t *free_list =
1238 vlib_buffer_get_buffer_free_list (vm, first, &index);
1239
1240 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1241 free_list->n_data_bytes -
1242 first->current_data);
1243 do
1244 {
1245 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1246 u32 need = want_first_size - first->current_length;
1247 u32 amount_to_copy = clib_min (need, second->current_length);
1248 clib_memcpy (((u8 *) vlib_buffer_get_current (first)) +
1249 first->current_length,
1250 vlib_buffer_get_current (second), amount_to_copy);
1251 first->current_length += amount_to_copy;
1252 vlib_buffer_advance (second, amount_to_copy);
1253 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1254 {
1255 first->total_length_not_including_first_buffer -= amount_to_copy;
1256 }
1257 if (!second->current_length)
1258 {
1259 vec_add1 (*discard_vector, first->next_buffer);
1260 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1261 {
1262 first->next_buffer = second->next_buffer;
1263 }
1264 else
1265 {
1266 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1267 }
1268 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1269 }
1270 }
1271 while ((first->current_length < want_first_size) &&
1272 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1273}
1274
Ed Warnickecb9cada2015-12-08 15:45:58 -07001275#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001276
1277/*
1278 * fd.io coding-style-patch-verification: ON
1279 *
1280 * Local Variables:
1281 * eval: (c-set-style "gnu")
1282 * End:
1283 */