blob: 509d3817823e0c3763860749a66ece7464b5fcf2 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Mariond1274cb2018-03-13 21:32:17 +010059 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020060 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
Damjan Marionafe56de2018-05-17 12:44:00 +020066/** \brief Translate array of buffer indices into buffer pointers with offset
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
69 @param bi - (u32 *) array of buffer indices
70 @param b - (void **) array to store buffer pointers
71 @param count - (uword) number of elements
72 @param offset - (i32) offset applied to each pointer
73*/
74static_always_inline void
75vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
76 i32 offset)
77{
78#ifdef CLIB_HAVE_VEC256
79 u64x4 off = u64x4_splat (buffer_main.buffer_mem_start + offset);
80 /* if count is not const, compiler will not unroll while loop
81 se we maintain two-in-parallel variant */
82 while (count >= 8)
83 {
84 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
85 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
86 /* shift and add to get vlib_buffer_t pointer */
87 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
88 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
89 b += 8;
90 bi += 8;
91 count -= 8;
92 }
93#endif
94 while (count >= 4)
95 {
96#ifdef CLIB_HAVE_VEC256
97 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
98 /* shift and add to get vlib_buffer_t pointer */
99 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
100#else
101 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
102 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
103 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
104 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
105#endif
106 b += 4;
107 bi += 4;
108 count -= 4;
109 }
110 while (count)
111 {
112 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
113 b += 1;
114 bi += 1;
115 count -= 1;
116 }
117}
118
119/** \brief Translate array of buffer indices into buffer pointers
120
121 @param vm - (vlib_main_t *) vlib main data structure pointer
122 @param bi - (u32 *) array of buffer indices
123 @param b - (vlib_buffer_t **) array to store buffer pointers
124 @param count - (uword) number of elements
125*/
126
127static_always_inline void
128vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
129{
130 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
131}
132
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133/** \brief Translate buffer pointer into buffer index
134
135 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400136 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400138*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200139
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400141vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100143 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200144 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
145 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
146 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400147 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
149}
150
Damjan Marionafe56de2018-05-17 12:44:00 +0200151/** \brief Translate array of buffer pointers into buffer indices with offset
152
153 @param vm - (vlib_main_t *) vlib main data structure pointer
154 @param b - (void **) array of buffer pointers
155 @param bi - (u32 *) array to store buffer indices
156 @param count - (uword) number of elements
157 @param offset - (i32) offset applied to each pointer
158*/
159static_always_inline void
160vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
161 uword count, i32 offset)
162{
163#ifdef CLIB_HAVE_VEC256
164 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
165 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - offset);
166
167 while (count >= 8)
168 {
169 /* load 4 pointers into 256-bit register */
170 u64x4 v0 = u64x4_load_unaligned (b);
171 u64x4 v1 = u64x4_load_unaligned (b + 4);
172 u32x8 v2, v3;
173
174 v0 -= off4;
175 v1 -= off4;
176
177 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
178 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
179
180 /* permute 256-bit register so lower u32s of each buffer index are
181 * placed into lower 128-bits */
182 v2 = u32x8_permute ((u32x8) v0, mask);
183 v3 = u32x8_permute ((u32x8) v1, mask);
184
185 /* extract lower 128-bits and save them to the array of buffer indices */
186 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
187 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
188 bi += 8;
189 b += 8;
190 count -= 8;
191 }
192#endif
193 while (count >= 4)
194 {
195 /* equivalent non-nector implementation */
196 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
197 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
198 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
199 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
200 bi += 4;
201 b += 4;
202 count -= 4;
203 }
204 while (count)
205 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400206 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200207 bi += 1;
208 b += 1;
209 count -= 1;
210 }
211}
212
213/** \brief Translate array of buffer pointers into buffer indices
214
215 @param vm - (vlib_main_t *) vlib main data structure pointer
216 @param b - (vlib_buffer_t **) array of buffer pointers
217 @param bi - (u32 *) array to store buffer indices
218 @param count - (uword) number of elements
219*/
220static_always_inline void
221vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
222 uword count)
223{
224 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
225}
226
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227/** \brief Get next buffer in buffer linklist, or zero for end of list.
228
229 @param vm - (vlib_main_t *) vlib main data structure pointer
230 @param b - (void *) buffer pointer
231 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400232*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233always_inline vlib_buffer_t *
234vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
235{
236 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400237 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238}
239
Dave Barach9b8ffd92016-07-08 08:13:45 -0400240uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
241 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242
243/** \brief Get length in bytes of the buffer chain
244
245 @param vm - (vlib_main_t *) vlib main data structure pointer
246 @param b - (void *) buffer pointer
247 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400248*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249always_inline uword
250vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
251{
Damjan Marion072401e2017-07-13 18:53:27 +0200252 uword len = b->current_length;
253
254 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
255 return len;
256
257 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
258 return len + b->total_length_not_including_first_buffer;
259
260 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261}
262
263/** \brief Get length in bytes of the buffer index buffer chain
264
265 @param vm - (vlib_main_t *) vlib main data structure pointer
266 @param bi - (u32) buffer index
267 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400268*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269always_inline uword
270vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
271{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273 return vlib_buffer_length_in_chain (vm, b);
274}
275
276/** \brief Copy buffer contents to memory
277
278 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400279 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280 @param contents - (u8 *) memory, <strong>must be large enough</strong>
281 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400282*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283always_inline uword
284vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
285{
286 uword content_len = 0;
287 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400288 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700289
290 while (1)
291 {
292 b = vlib_get_buffer (vm, buffer_index);
293 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100294 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700295 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400296 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700297 break;
298 buffer_index = b->next_buffer;
299 }
300
301 return content_len;
302}
303
304/* Return physical address of buffer->data start. */
305always_inline u64
306vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
307{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100308 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion149ba772017-10-12 13:09:26 +0200309 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
Damjan Marioncef87f12017-10-05 15:32:41 +0200310 vlib_buffer_pool_t *pool = vec_elt_at_index (bm->buffer_pools,
311 b->buffer_pool_index);
312
313 return vlib_physmem_virtual_to_physical (vm, pool->physmem_region, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314}
315
316/** \brief Prefetch buffer metadata by buffer index
317 The first 64 bytes of buffer contains most header information
318
319 @param vm - (vlib_main_t *) vlib main data structure pointer
320 @param bi - (u32) buffer index
321 @param type - LOAD, STORE. In most cases, STORE is the right answer
322*/
323/* Prefetch buffer header given index. */
324#define vlib_prefetch_buffer_with_index(vm,bi,type) \
325 do { \
326 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
327 vlib_prefetch_buffer_header (_b, type); \
328 } while (0)
329
330#if 0
331/* Iterate over known allocated vlib bufs. You probably do not want
332 * to do this!
333 @param vm the vlib_main_t
334 @param bi found allocated buffer index
335 @param body operation to perform on buffer index
336 function executes body for each allocated buffer index
337 */
338#define vlib_buffer_foreach_allocated(vm,bi,body) \
339do { \
340 vlib_main_t * _vmain = (vm); \
341 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
342 hash_pair_t * _vbpair; \
343 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
344 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
345 (bi) = _vbpair->key; \
346 body; \
347 } \
348 })); \
349} while (0)
350#endif
351
Dave Barach9b8ffd92016-07-08 08:13:45 -0400352typedef enum
353{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354 /* Index is unknown. */
355 VLIB_BUFFER_UNKNOWN,
356
357 /* Index is known and free/allocated. */
358 VLIB_BUFFER_KNOWN_FREE,
359 VLIB_BUFFER_KNOWN_ALLOCATED,
360} vlib_buffer_known_state_t;
361
Damjan Marionc8a26c62017-11-24 20:15:23 +0100362void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
363 uword n_buffers,
364 vlib_buffer_known_state_t
365 expected_state);
366
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800368vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700369{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100370 vlib_buffer_main_t *bm = &buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371
Damjan Marion6b0f5892017-07-27 04:01:24 -0400372 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400373 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400374 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700375 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
376}
377
378always_inline void
Steven899a84b2018-01-29 20:09:09 -0800379vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380 vlib_buffer_known_state_t state)
381{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100382 vlib_buffer_main_t *bm = &buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800383
Damjan Marion6b0f5892017-07-27 04:01:24 -0400384 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400386 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387}
388
389/* Validates sanity of a single buffer.
390 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400391u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
392 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700393
Ed Warnickecb9cada2015-12-08 15:45:58 -0700394always_inline u32
395vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400396{
397 return round_pow2 (size, sizeof (vlib_buffer_t));
398}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700399
Damjan Mariondac03522018-02-01 15:30:13 +0100400always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200401vlib_buffer_get_free_list_index (vlib_buffer_t * b)
402{
Damjan Mariondac03522018-02-01 15:30:13 +0100403 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
404 return b->free_list_index;
405
406 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200407}
408
409always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100410vlib_buffer_set_free_list_index (vlib_buffer_t * b,
411 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200412{
Damjan Mariondac03522018-02-01 15:30:13 +0100413 if (PREDICT_FALSE (index))
414 {
415 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
416 b->free_list_index = index;
417 }
418 else
419 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200420}
421
Ed Warnickecb9cada2015-12-08 15:45:58 -0700422/** \brief Allocate buffers from specific freelist into supplied array
423
424 @param vm - (vlib_main_t *) vlib main data structure pointer
425 @param buffers - (u32 * ) buffer index array
426 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400427 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700428 less than the number requested or zero
429*/
Damjan Marion878c6092017-01-04 13:19:27 +0100430always_inline u32
431vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
432 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100433 u32 n_buffers,
434 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100435{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100436 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100437 vlib_buffer_free_list_t *fl;
438 u32 *src;
439 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100440
Damjan Marionc8a26c62017-11-24 20:15:23 +0100441 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100442
Damjan Mariond1274cb2018-03-13 21:32:17 +0100443 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100444
445 len = vec_len (fl->buffers);
446
447 if (PREDICT_FALSE (len < n_buffers))
448 {
449 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100450 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
451 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100452
453 /* even if fill free list didn't manage to refill free list
454 we should give what we have */
455 n_buffers = clib_min (len, n_buffers);
456
457 /* following code is intentionaly duplicated to allow compiler
458 to optimize fast path when n_buffers is constant value */
459 src = fl->buffers + len - n_buffers;
460 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
461 _vec_len (fl->buffers) -= n_buffers;
462
463 /* Verify that buffers are known free. */
464 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
465 VLIB_BUFFER_KNOWN_FREE);
466
467 return n_buffers;
468 }
469
470 src = fl->buffers + len - n_buffers;
471 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
472 _vec_len (fl->buffers) -= n_buffers;
473
474 /* Verify that buffers are known free. */
475 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
476 VLIB_BUFFER_KNOWN_FREE);
477
478 return n_buffers;
479}
480
481/** \brief Allocate buffers into supplied array
482
483 @param vm - (vlib_main_t *) vlib main data structure pointer
484 @param buffers - (u32 * ) buffer index array
485 @param n_buffers - (u32) number of buffers requested
486 @return - (u32) number of buffers actually allocated, may be
487 less than the number requested or zero
488*/
489always_inline u32
490vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
491{
492 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
493 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100494}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700495
Damjan Marionc58408c2018-01-18 14:54:04 +0100496/** \brief Allocate buffers into ring
497
498 @param vm - (vlib_main_t *) vlib main data structure pointer
499 @param buffers - (u32 * ) buffer index ring
500 @param start - (u32) first slot in the ring
501 @param ring_size - (u32) ring size
502 @param n_buffers - (u32) number of buffers requested
503 @return - (u32) number of buffers actually allocated, may be
504 less than the number requested or zero
505*/
506always_inline u32
507vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
508 u32 ring_size, u32 n_buffers)
509{
510 u32 n_alloc;
511
512 ASSERT (n_buffers <= ring_size);
513
514 if (PREDICT_TRUE (start + n_buffers <= ring_size))
515 return vlib_buffer_alloc (vm, ring + start, n_buffers);
516
517 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
518
519 if (PREDICT_TRUE (n_alloc == ring_size - start))
520 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
521
522 return n_alloc;
523}
524
Ed Warnickecb9cada2015-12-08 15:45:58 -0700525/** \brief Free buffers
526 Frees the entire buffer chain for each buffer
527
528 @param vm - (vlib_main_t *) vlib main data structure pointer
529 @param buffers - (u32 * ) buffer index array
530 @param n_buffers - (u32) number of buffers to free
531
532*/
Damjan Marion878c6092017-01-04 13:19:27 +0100533always_inline void
534vlib_buffer_free (vlib_main_t * vm,
535 /* pointer to first buffer */
536 u32 * buffers,
537 /* number of buffers to free */
538 u32 n_buffers)
539{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100540 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100541
542 ASSERT (bm->cb.vlib_buffer_free_cb);
543
544 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
545}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700546
547/** \brief Free buffers, does not free the buffer chain for each buffer
548
549 @param vm - (vlib_main_t *) vlib main data structure pointer
550 @param buffers - (u32 * ) buffer index array
551 @param n_buffers - (u32) number of buffers to free
552
553*/
Damjan Marion878c6092017-01-04 13:19:27 +0100554always_inline void
555vlib_buffer_free_no_next (vlib_main_t * vm,
556 /* pointer to first buffer */
557 u32 * buffers,
558 /* number of buffers to free */
559 u32 n_buffers)
560{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100561 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100562
563 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
564
565 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
566}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567
568/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400569 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570
571 @param vm - (vlib_main_t *) vlib main data structure pointer
572 @param buffer_index - (u32) buffer index to free
573*/
574always_inline void
575vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
576{
577 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
578}
579
Damjan Mariona3731492018-02-25 22:50:39 +0100580/** \brief Free buffers from ring
581
582 @param vm - (vlib_main_t *) vlib main data structure pointer
583 @param buffers - (u32 * ) buffer index ring
584 @param start - (u32) first slot in the ring
585 @param ring_size - (u32) ring size
586 @param n_buffers - (u32) number of buffers
587*/
588always_inline void
589vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
590 u32 ring_size, u32 n_buffers)
591{
592 ASSERT (n_buffers <= ring_size);
593
594 if (PREDICT_TRUE (start + n_buffers <= ring_size))
595 {
596 vlib_buffer_free (vm, ring + start, n_buffers);
597 }
598 else
599 {
600 vlib_buffer_free (vm, ring + start, ring_size - start);
601 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
602 }
603}
604
Damjan Marioncef1db92018-03-28 18:27:38 +0200605/** \brief Free buffers from ring without freeing tail buffers
606
607 @param vm - (vlib_main_t *) vlib main data structure pointer
608 @param buffers - (u32 * ) buffer index ring
609 @param start - (u32) first slot in the ring
610 @param ring_size - (u32) ring size
611 @param n_buffers - (u32) number of buffers
612*/
613always_inline void
614vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
615 u32 ring_size, u32 n_buffers)
616{
617 ASSERT (n_buffers <= ring_size);
618
619 if (PREDICT_TRUE (start + n_buffers <= ring_size))
620 {
621 vlib_buffer_free (vm, ring + start, n_buffers);
622 }
623 else
624 {
625 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
626 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
627 }
628}
Damjan Mariona3731492018-02-25 22:50:39 +0100629
Ed Warnickecb9cada2015-12-08 15:45:58 -0700630/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100631vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
632 u32 n_data_bytes,
633 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100634always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100635vlib_buffer_delete_free_list (vlib_main_t * vm,
636 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100637{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100638 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100639
640 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
641
642 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
643}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700644
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100645/* Make sure we have at least given number of unaligned buffers. */
646void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
647 vlib_buffer_free_list_t *
648 free_list,
649 uword n_unaligned_buffers);
650
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100651always_inline vlib_buffer_free_list_t *
652vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100653 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100654{
Damjan Mariondac03522018-02-01 15:30:13 +0100655 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100656
Damjan Marion072401e2017-07-13 18:53:27 +0200657 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Mariond1274cb2018-03-13 21:32:17 +0100658 return pool_elt_at_index (vm->buffer_free_list_pool, i);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100659}
660
Ed Warnickecb9cada2015-12-08 15:45:58 -0700661always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100662vlib_buffer_get_free_list (vlib_main_t * vm,
663 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700664{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400665 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700666
Damjan Mariond1274cb2018-03-13 21:32:17 +0100667 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700668
669 /* Sanity: indices must match. */
670 ASSERT (f->index == free_list_index);
671
672 return f;
673}
674
675always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100676vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
677 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700678{
Damjan Mariondac03522018-02-01 15:30:13 +0100679 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700680 return f->n_data_bytes;
681}
682
Dave Barach9b8ffd92016-07-08 08:13:45 -0400683void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700684
685/* Reasonably fast buffer copy routine. */
686always_inline void
687vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
688{
689 while (n >= 4)
690 {
691 dst[0] = src[0];
692 dst[1] = src[1];
693 dst[2] = src[2];
694 dst[3] = src[3];
695 dst += 4;
696 src += 4;
697 n -= 4;
698 }
699 while (n > 0)
700 {
701 dst[0] = src[0];
702 dst += 1;
703 src += 1;
704 n -= 1;
705 }
706}
707
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708/* Append given data to end of buffer, possibly allocating new buffers. */
709u32 vlib_buffer_add_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100710 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400711 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700712
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100713/* duplicate all buffers in chain */
714always_inline vlib_buffer_t *
715vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
716{
717 vlib_buffer_t *s, *d, *fd;
718 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100719 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100720 int i;
721
722 s = b;
723 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
724 {
725 n_buffers++;
726 s = vlib_get_buffer (vm, s->next_buffer);
727 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700728 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100729
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100730 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500731
732 /* No guarantee that we'll get all the buffers we asked for */
733 if (PREDICT_FALSE (n_alloc < n_buffers))
734 {
735 if (n_alloc > 0)
736 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500737 return 0;
738 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100739
740 /* 1st segment */
741 s = b;
742 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100743 d->current_data = s->current_data;
744 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100745 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100746 d->total_length_not_including_first_buffer =
747 s->total_length_not_including_first_buffer;
748 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Damjan Mariondce05452016-12-01 11:59:33 +0100749 clib_memcpy (vlib_buffer_get_current (d),
750 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100751
752 /* next segments */
753 for (i = 1; i < n_buffers; i++)
754 {
755 /* previous */
756 d->next_buffer = new_buffers[i];
757 /* current */
758 s = vlib_get_buffer (vm, s->next_buffer);
759 d = vlib_get_buffer (vm, new_buffers[i]);
760 d->current_data = s->current_data;
761 d->current_length = s->current_length;
762 clib_memcpy (vlib_buffer_get_current (d),
763 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100764 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100765 }
766
767 return fd;
768}
769
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800770/** \brief Create a maximum of 256 clones of buffer and store them
771 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100772
773 @param vm - (vlib_main_t *) vlib main data structure pointer
774 @param src_buffer - (u32) source buffer index
775 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800776 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100777 @param head_end_offset - (u16) offset relative to current position
778 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800779 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100780 less than the number requested or zero
781*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800782always_inline u16
783vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
784 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100785{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800786 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100787 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
788
789 ASSERT (s->n_add_refs == 0);
790 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800791 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100792
793 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
794 {
795 buffers[0] = src_buffer;
796 for (i = 1; i < n_buffers; i++)
797 {
798 vlib_buffer_t *d;
799 d = vlib_buffer_copy (vm, s);
800 if (d == 0)
801 return i;
802 buffers[i] = vlib_get_buffer_index (vm, d);
803
804 }
805 return n_buffers;
806 }
807
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800808 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100809 {
810 buffers[0] = src_buffer;
811 return 1;
812 }
813
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800814 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
815 vlib_buffer_get_free_list_index
816 (s));
817
Damjan Marionc47ed032017-01-25 14:18:03 +0100818 for (i = 0; i < n_buffers; i++)
819 {
820 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
821 d->current_data = s->current_data;
822 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200823 vlib_buffer_set_free_list_index (d,
824 vlib_buffer_get_free_list_index (s));
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200825
826 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100827 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200828 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
829 {
830 d->total_length_not_including_first_buffer +=
831 s->total_length_not_including_first_buffer;
832 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100833 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
834 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
835 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
836 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
837 head_end_offset);
838 d->next_buffer = src_buffer;
839 }
840 vlib_buffer_advance (s, head_end_offset);
841 s->n_add_refs = n_buffers - 1;
842 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
843 {
844 s = vlib_get_buffer (vm, s->next_buffer);
845 s->n_add_refs = n_buffers - 1;
846 }
847
848 return n_buffers;
849}
850
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800851/** \brief Create multiple clones of buffer and store them
852 in the supplied array
853
854 @param vm - (vlib_main_t *) vlib main data structure pointer
855 @param src_buffer - (u32) source buffer index
856 @param buffers - (u32 * ) buffer index array
857 @param n_buffers - (u16) number of buffer clones requested (<=256)
858 @param head_end_offset - (u16) offset relative to current position
859 where packet head ends
860 @return - (u16) number of buffers actually cloned, may be
861 less than the number requested or zero
862*/
863always_inline u16
864vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
865 u16 n_buffers, u16 head_end_offset)
866{
867 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
868 u16 n_cloned = 0;
869
870 while (n_buffers > 256)
871 {
872 vlib_buffer_t *copy;
873 copy = vlib_buffer_copy (vm, s);
874 n_cloned += vlib_buffer_clone_256 (vm,
875 vlib_get_buffer_index (vm, copy),
876 (buffers + n_cloned),
877 256, head_end_offset);
878 n_buffers -= 256;
879 }
880 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
881 buffers + n_cloned,
882 n_buffers, head_end_offset);
883
884 return n_cloned;
885}
886
Damjan Marionc47ed032017-01-25 14:18:03 +0100887/** \brief Attach cloned tail to the buffer
888
889 @param vm - (vlib_main_t *) vlib main data structure pointer
890 @param head - (vlib_buffer_t *) head buffer
891 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
892*/
893
894always_inline void
895vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
896 vlib_buffer_t * tail)
897{
898 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200899 ASSERT (vlib_buffer_get_free_list_index (head) ==
900 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100901
902 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
903 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
904 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
905 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
906 head->next_buffer = vlib_get_buffer_index (vm, tail);
907 head->total_length_not_including_first_buffer = tail->current_length +
908 tail->total_length_not_including_first_buffer;
909
910next_segment:
911 __sync_add_and_fetch (&tail->n_add_refs, 1);
912
913 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
914 {
915 tail = vlib_get_buffer (vm, tail->next_buffer);
916 goto next_segment;
917 }
918}
919
Pierre Pfister328e99b2016-02-12 13:18:42 +0000920/* Initializes the buffer as an empty packet with no chained buffers. */
921always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400922vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000923{
924 first->total_length_not_including_first_buffer = 0;
925 first->current_length = 0;
926 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
927 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000928}
929
930/* The provided next_bi buffer index is appended to the end of the packet. */
931always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400932vlib_buffer_chain_buffer (vlib_main_t * vm,
933 vlib_buffer_t * first,
934 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000935{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400936 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000937 last->next_buffer = next_bi;
938 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
939 next_buffer->current_length = 0;
940 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000941 return next_buffer;
942}
943
944/* Increases or decreases the packet length.
945 * It does not allocate or deallocate new buffers.
946 * Therefore, the added length must be compatible
947 * with the last buffer. */
948always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400949vlib_buffer_chain_increase_length (vlib_buffer_t * first,
950 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000951{
952 last->current_length += len;
953 if (first != last)
954 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000955}
956
957/* Copy data to the end of the packet and increases its length.
958 * It does not allocate new buffers.
959 * Returns the number of copied bytes. */
960always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400961vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100962 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400963 vlib_buffer_t * first,
964 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000965{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400966 u32 n_buffer_bytes =
967 vlib_buffer_free_list_buffer_size (vm, free_list_index);
968 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
969 u16 len = clib_min (data_len,
970 n_buffer_bytes - last->current_length -
971 last->current_data);
972 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
973 len);
974 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000975 return len;
976}
977
978/* Copy data to the end of the packet and increases its length.
979 * Allocates additional buffers from the free list if necessary.
980 * Returns the number of copied bytes.
981 * 'last' value is modified whenever new buffers are allocated and
982 * chained and points to the last buffer in the chain. */
983u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400984vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100985 vlib_buffer_free_list_index_t
986 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400987 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +0100988 vlib_buffer_t ** last, void *data,
989 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400990void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000991
Dave Barach9b8ffd92016-07-08 08:13:45 -0400992format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
993 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700994
Dave Barach9b8ffd92016-07-08 08:13:45 -0400995typedef struct
996{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700997 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400998 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700999
Damjan Mariond1274cb2018-03-13 21:32:17 +01001000 /* Number of buffers to allocate in each call to allocator. */
1001 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001002
1003 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +01001004 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001005
Dave Barach9b8ffd92016-07-08 08:13:45 -04001006 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001007} vlib_packet_template_t;
1008
1009void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
1010 vlib_packet_template_t * t);
1011
1012void vlib_packet_template_init (vlib_main_t * vm,
1013 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001014 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +01001016 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001017 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001018
Dave Barach9b8ffd92016-07-08 08:13:45 -04001019void *vlib_packet_template_get_packet (vlib_main_t * vm,
1020 vlib_packet_template_t * t,
1021 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001022
1023always_inline void
1024vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1025{
1026 vec_free (t->packet_data);
1027}
1028
1029always_inline u32
1030unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
1031{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001032 serialize_stream_t *s = &m->stream;
1033 vlib_serialize_buffer_main_t *sm
1034 = uword_to_pointer (m->stream.data_function_opaque,
1035 vlib_serialize_buffer_main_t *);
1036 vlib_main_t *vm = sm->vlib_main;
1037 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001038
1039 n = s->n_buffer_bytes - s->current_buffer_index;
1040 if (sm->last_buffer != ~0)
1041 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001042 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001043 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1044 {
1045 b = vlib_get_buffer (vm, b->next_buffer);
1046 n += b->current_length;
1047 }
1048 }
1049
Dave Barach9b8ffd92016-07-08 08:13:45 -04001050 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001051 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
1052 n += vlib_buffer_index_length_in_chain (vm, f[0]);
1053 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -04001054/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001055
1056 return n;
1057}
1058
Ed Warnickecb9cada2015-12-08 15:45:58 -07001059/* Set a buffer quickly into "uninitialized" state. We want this to
1060 be extremely cheap and arrange for all fields that need to be
1061 initialized to be in the first 128 bits of the buffer. */
1062always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001063vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001064 vlib_buffer_free_list_t * fl)
1065{
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001066 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001067
Damjan Marion19010202016-03-24 17:17:47 +01001068 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001069 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
1070 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
1071 CLIB_CACHE_LINE_BYTES);
1072 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
1073 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +01001074
Ed Warnickecb9cada2015-12-08 15:45:58 -07001075 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +02001076 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001077
Dave Barachf8690282017-03-01 11:38:02 -05001078 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
1079 STRUCT_MARK_PTR (src, template_start),
1080 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
1081 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
1082
1083 /* Not in the first 16 octets. */
1084 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +01001085 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -05001086
Ed Warnickecb9cada2015-12-08 15:45:58 -07001087 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -05001088#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001089 _(current_data);
1090 _(current_length);
1091 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001092#undef _
Florin Corasb2215d62017-08-01 16:56:58 -07001093 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
1094 /* total_length_not_including_first_buffer is not in the template anymore
1095 * so it may actually not zeroed for some buffers. One option is to
1096 * uncomment the line lower (comes at a cost), the other, is to just not
1097 * care */
1098 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001099 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001100}
1101
1102always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001103vlib_buffer_add_to_free_list (vlib_main_t * vm,
1104 vlib_buffer_free_list_t * f,
1105 u32 buffer_index, u8 do_init)
1106{
Damjan Mariond1274cb2018-03-13 21:32:17 +01001107 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001108 vlib_buffer_t *b;
1109 b = vlib_get_buffer (vm, buffer_index);
1110 if (PREDICT_TRUE (do_init))
1111 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001112 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001113
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001114 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001115 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001116 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001117 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001118 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001119 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001120 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001121 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001122 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001123 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001124}
1125
Ed Warnickecb9cada2015-12-08 15:45:58 -07001126#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001127extern u32 *vlib_buffer_state_validation_lock;
1128extern uword *vlib_buffer_state_validation_hash;
1129extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001130#endif
1131
Dave Barach9b8ffd92016-07-08 08:13:45 -04001132static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001133vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1134{
1135#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001136 uword *p;
1137 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001138
1139 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1140
1141 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
1142 ;
1143
1144 p = hash_get (vlib_buffer_state_validation_hash, b);
1145
1146 /* If we don't know about b, declare it to be in the expected state */
1147 if (!p)
1148 {
1149 hash_set (vlib_buffer_state_validation_hash, b, expected);
1150 goto out;
1151 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001152
Ed Warnickecb9cada2015-12-08 15:45:58 -07001153 if (p[0] != expected)
1154 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001155 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001156 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001157 vlib_main_t *vm = &vlib_global_main;
1158
1159 cj_stop ();
1160
Ed Warnickecb9cada2015-12-08 15:45:58 -07001161 bi = vlib_get_buffer_index (vm, b);
1162
1163 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001164 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1165 vlib_time_now (vm), bi,
1166 p[0] ? "busy" : "free", expected ? "busy" : "free");
1167 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001168 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001169out:
1170 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001171 *vlib_buffer_state_validation_lock = 0;
1172 clib_mem_set_heap (oldheap);
1173#endif
1174}
1175
Dave Barach9b8ffd92016-07-08 08:13:45 -04001176static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001177vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1178{
1179#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001180 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001181
1182 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1183
1184 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
1185 ;
1186
1187 hash_set (vlib_buffer_state_validation_hash, b, expected);
1188
Dave Barach9b8ffd92016-07-08 08:13:45 -04001189 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001190 *vlib_buffer_state_validation_lock = 0;
1191 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001192#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001193}
1194
Klement Sekera75e7d132017-09-20 08:26:30 +02001195/** minimum data size of first buffer in a buffer chain */
1196#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1197
1198/**
1199 * @brief compress buffer chain in a way where the first buffer is at least
1200 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1201 *
1202 * @param[in] vm - vlib_main
1203 * @param[in,out] first - first buffer in chain
1204 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1205 * from the chain
1206 */
1207always_inline void
1208vlib_buffer_chain_compress (vlib_main_t * vm,
1209 vlib_buffer_t * first, u32 ** discard_vector)
1210{
1211 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1212 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1213 {
1214 /* this is already big enough or not a chain */
1215 return;
1216 }
1217 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001218 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001219 vlib_buffer_free_list_t *free_list =
1220 vlib_buffer_get_buffer_free_list (vm, first, &index);
1221
1222 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1223 free_list->n_data_bytes -
1224 first->current_data);
1225 do
1226 {
1227 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1228 u32 need = want_first_size - first->current_length;
1229 u32 amount_to_copy = clib_min (need, second->current_length);
1230 clib_memcpy (((u8 *) vlib_buffer_get_current (first)) +
1231 first->current_length,
1232 vlib_buffer_get_current (second), amount_to_copy);
1233 first->current_length += amount_to_copy;
1234 vlib_buffer_advance (second, amount_to_copy);
1235 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1236 {
1237 first->total_length_not_including_first_buffer -= amount_to_copy;
1238 }
1239 if (!second->current_length)
1240 {
1241 vec_add1 (*discard_vector, first->next_buffer);
1242 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1243 {
1244 first->next_buffer = second->next_buffer;
1245 }
1246 else
1247 {
1248 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1249 }
1250 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1251 }
1252 }
1253 while ((first->current_length < want_first_size) &&
1254 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1255}
1256
Ed Warnickecb9cada2015-12-08 15:45:58 -07001257#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001258
1259/*
1260 * fd.io coding-style-patch-verification: ON
1261 *
1262 * Local Variables:
1263 * eval: (c-set-style "gnu")
1264 * End:
1265 */