blob: 667063cd693ec88f46ec610bf0a63f158103be6f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Mariond1274cb2018-03-13 21:32:17 +010059 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020060 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
Damjan Marionafe56de2018-05-17 12:44:00 +020066/** \brief Translate array of buffer indices into buffer pointers with offset
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
69 @param bi - (u32 *) array of buffer indices
70 @param b - (void **) array to store buffer pointers
71 @param count - (uword) number of elements
72 @param offset - (i32) offset applied to each pointer
73*/
74static_always_inline void
75vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
76 i32 offset)
77{
78#ifdef CLIB_HAVE_VEC256
79 u64x4 off = u64x4_splat (buffer_main.buffer_mem_start + offset);
80 /* if count is not const, compiler will not unroll while loop
81 se we maintain two-in-parallel variant */
82 while (count >= 8)
83 {
84 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
85 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
86 /* shift and add to get vlib_buffer_t pointer */
87 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
88 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
89 b += 8;
90 bi += 8;
91 count -= 8;
92 }
93#endif
94 while (count >= 4)
95 {
96#ifdef CLIB_HAVE_VEC256
97 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
98 /* shift and add to get vlib_buffer_t pointer */
99 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Damjan Marion5df580e2018-07-27 01:47:57 +0200100#elif defined (CLIB_HAVE_VEC128) && defined (__x86_64__)
101 u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
102 u32x4 bi4 = u32x4_load_unaligned (bi);
103 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
104 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
105 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
106 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
107 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200108#else
109 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
110 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
111 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
112 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
113#endif
114 b += 4;
115 bi += 4;
116 count -= 4;
117 }
118 while (count)
119 {
120 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
121 b += 1;
122 bi += 1;
123 count -= 1;
124 }
125}
126
127/** \brief Translate array of buffer indices into buffer pointers
128
129 @param vm - (vlib_main_t *) vlib main data structure pointer
130 @param bi - (u32 *) array of buffer indices
131 @param b - (vlib_buffer_t **) array to store buffer pointers
132 @param count - (uword) number of elements
133*/
134
135static_always_inline void
136vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
137{
138 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
139}
140
Ed Warnickecb9cada2015-12-08 15:45:58 -0700141/** \brief Translate buffer pointer into buffer index
142
143 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400144 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700145 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400146*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200147
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400149vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100151 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200152 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
153 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
154 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400155 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
157}
158
Damjan Marionafe56de2018-05-17 12:44:00 +0200159/** \brief Translate array of buffer pointers into buffer indices with offset
160
161 @param vm - (vlib_main_t *) vlib main data structure pointer
162 @param b - (void **) array of buffer pointers
163 @param bi - (u32 *) array to store buffer indices
164 @param count - (uword) number of elements
165 @param offset - (i32) offset applied to each pointer
166*/
167static_always_inline void
168vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
169 uword count, i32 offset)
170{
171#ifdef CLIB_HAVE_VEC256
172 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
173 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - offset);
174
175 while (count >= 8)
176 {
177 /* load 4 pointers into 256-bit register */
178 u64x4 v0 = u64x4_load_unaligned (b);
179 u64x4 v1 = u64x4_load_unaligned (b + 4);
180 u32x8 v2, v3;
181
182 v0 -= off4;
183 v1 -= off4;
184
185 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
186 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
187
188 /* permute 256-bit register so lower u32s of each buffer index are
189 * placed into lower 128-bits */
190 v2 = u32x8_permute ((u32x8) v0, mask);
191 v3 = u32x8_permute ((u32x8) v1, mask);
192
193 /* extract lower 128-bits and save them to the array of buffer indices */
194 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
195 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
196 bi += 8;
197 b += 8;
198 count -= 8;
199 }
200#endif
201 while (count >= 4)
202 {
203 /* equivalent non-nector implementation */
204 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
205 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
206 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
207 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
208 bi += 4;
209 b += 4;
210 count -= 4;
211 }
212 while (count)
213 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400214 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200215 bi += 1;
216 b += 1;
217 count -= 1;
218 }
219}
220
221/** \brief Translate array of buffer pointers into buffer indices
222
223 @param vm - (vlib_main_t *) vlib main data structure pointer
224 @param b - (vlib_buffer_t **) array of buffer pointers
225 @param bi - (u32 *) array to store buffer indices
226 @param count - (uword) number of elements
227*/
228static_always_inline void
229vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
230 uword count)
231{
232 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
233}
234
Ed Warnickecb9cada2015-12-08 15:45:58 -0700235/** \brief Get next buffer in buffer linklist, or zero for end of list.
236
237 @param vm - (vlib_main_t *) vlib main data structure pointer
238 @param b - (void *) buffer pointer
239 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400240*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241always_inline vlib_buffer_t *
242vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
243{
244 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246}
247
Dave Barach9b8ffd92016-07-08 08:13:45 -0400248uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
249 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
251/** \brief Get length in bytes of the buffer chain
252
253 @param vm - (vlib_main_t *) vlib main data structure pointer
254 @param b - (void *) buffer pointer
255 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400256*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257always_inline uword
258vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
259{
Damjan Marion072401e2017-07-13 18:53:27 +0200260 uword len = b->current_length;
261
262 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
263 return len;
264
265 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
266 return len + b->total_length_not_including_first_buffer;
267
268 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269}
270
271/** \brief Get length in bytes of the buffer index buffer chain
272
273 @param vm - (vlib_main_t *) vlib main data structure pointer
274 @param bi - (u32) buffer index
275 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400276*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277always_inline uword
278vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
279{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400280 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281 return vlib_buffer_length_in_chain (vm, b);
282}
283
284/** \brief Copy buffer contents to memory
285
286 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400287 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288 @param contents - (u8 *) memory, <strong>must be large enough</strong>
289 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400290*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700291always_inline uword
292vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
293{
294 uword content_len = 0;
295 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400296 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700297
298 while (1)
299 {
300 b = vlib_get_buffer (vm, buffer_index);
301 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100302 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400304 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700305 break;
306 buffer_index = b->next_buffer;
307 }
308
309 return content_len;
310}
311
312/* Return physical address of buffer->data start. */
313always_inline u64
314vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
315{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100316 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion149ba772017-10-12 13:09:26 +0200317 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
Damjan Marioncef87f12017-10-05 15:32:41 +0200318 vlib_buffer_pool_t *pool = vec_elt_at_index (bm->buffer_pools,
319 b->buffer_pool_index);
320
321 return vlib_physmem_virtual_to_physical (vm, pool->physmem_region, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322}
323
324/** \brief Prefetch buffer metadata by buffer index
325 The first 64 bytes of buffer contains most header information
326
327 @param vm - (vlib_main_t *) vlib main data structure pointer
328 @param bi - (u32) buffer index
329 @param type - LOAD, STORE. In most cases, STORE is the right answer
330*/
331/* Prefetch buffer header given index. */
332#define vlib_prefetch_buffer_with_index(vm,bi,type) \
333 do { \
334 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
335 vlib_prefetch_buffer_header (_b, type); \
336 } while (0)
337
338#if 0
339/* Iterate over known allocated vlib bufs. You probably do not want
340 * to do this!
341 @param vm the vlib_main_t
342 @param bi found allocated buffer index
343 @param body operation to perform on buffer index
344 function executes body for each allocated buffer index
345 */
346#define vlib_buffer_foreach_allocated(vm,bi,body) \
347do { \
348 vlib_main_t * _vmain = (vm); \
349 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
350 hash_pair_t * _vbpair; \
351 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
352 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
353 (bi) = _vbpair->key; \
354 body; \
355 } \
356 })); \
357} while (0)
358#endif
359
Dave Barach9b8ffd92016-07-08 08:13:45 -0400360typedef enum
361{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362 /* Index is unknown. */
363 VLIB_BUFFER_UNKNOWN,
364
365 /* Index is known and free/allocated. */
366 VLIB_BUFFER_KNOWN_FREE,
367 VLIB_BUFFER_KNOWN_ALLOCATED,
368} vlib_buffer_known_state_t;
369
Damjan Marionc8a26c62017-11-24 20:15:23 +0100370void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
371 uword n_buffers,
372 vlib_buffer_known_state_t
373 expected_state);
374
Ed Warnickecb9cada2015-12-08 15:45:58 -0700375always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800376vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700377{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100378 vlib_buffer_main_t *bm = &buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
Damjan Marion6b0f5892017-07-27 04:01:24 -0400380 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400381 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400382 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700383 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
384}
385
386always_inline void
Steven899a84b2018-01-29 20:09:09 -0800387vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388 vlib_buffer_known_state_t state)
389{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100390 vlib_buffer_main_t *bm = &buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800391
Damjan Marion6b0f5892017-07-27 04:01:24 -0400392 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700393 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400394 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700395}
396
397/* Validates sanity of a single buffer.
398 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400399u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
400 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700401
Ed Warnickecb9cada2015-12-08 15:45:58 -0700402always_inline u32
403vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400404{
405 return round_pow2 (size, sizeof (vlib_buffer_t));
406}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700407
Damjan Mariondac03522018-02-01 15:30:13 +0100408always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200409vlib_buffer_get_free_list_index (vlib_buffer_t * b)
410{
Damjan Mariondac03522018-02-01 15:30:13 +0100411 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
412 return b->free_list_index;
413
414 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200415}
416
417always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100418vlib_buffer_set_free_list_index (vlib_buffer_t * b,
419 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200420{
Damjan Mariondac03522018-02-01 15:30:13 +0100421 if (PREDICT_FALSE (index))
422 {
423 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
424 b->free_list_index = index;
425 }
426 else
427 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200428}
429
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430/** \brief Allocate buffers from specific freelist into supplied array
431
432 @param vm - (vlib_main_t *) vlib main data structure pointer
433 @param buffers - (u32 * ) buffer index array
434 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400435 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436 less than the number requested or zero
437*/
Damjan Marion878c6092017-01-04 13:19:27 +0100438always_inline u32
439vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
440 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100441 u32 n_buffers,
442 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100443{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100444 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100445 vlib_buffer_free_list_t *fl;
446 u32 *src;
447 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100448
Damjan Marionc8a26c62017-11-24 20:15:23 +0100449 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100450
Damjan Mariond1274cb2018-03-13 21:32:17 +0100451 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100452
453 len = vec_len (fl->buffers);
454
455 if (PREDICT_FALSE (len < n_buffers))
456 {
457 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100458 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
459 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100460
461 /* even if fill free list didn't manage to refill free list
462 we should give what we have */
463 n_buffers = clib_min (len, n_buffers);
464
465 /* following code is intentionaly duplicated to allow compiler
466 to optimize fast path when n_buffers is constant value */
467 src = fl->buffers + len - n_buffers;
468 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
469 _vec_len (fl->buffers) -= n_buffers;
470
471 /* Verify that buffers are known free. */
472 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
473 VLIB_BUFFER_KNOWN_FREE);
474
475 return n_buffers;
476 }
477
478 src = fl->buffers + len - n_buffers;
479 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
480 _vec_len (fl->buffers) -= n_buffers;
481
482 /* Verify that buffers are known free. */
483 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
484 VLIB_BUFFER_KNOWN_FREE);
485
486 return n_buffers;
487}
488
489/** \brief Allocate buffers into supplied array
490
491 @param vm - (vlib_main_t *) vlib main data structure pointer
492 @param buffers - (u32 * ) buffer index array
493 @param n_buffers - (u32) number of buffers requested
494 @return - (u32) number of buffers actually allocated, may be
495 less than the number requested or zero
496*/
497always_inline u32
498vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
499{
500 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
501 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100502}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700503
Damjan Marionc58408c2018-01-18 14:54:04 +0100504/** \brief Allocate buffers into ring
505
506 @param vm - (vlib_main_t *) vlib main data structure pointer
507 @param buffers - (u32 * ) buffer index ring
508 @param start - (u32) first slot in the ring
509 @param ring_size - (u32) ring size
510 @param n_buffers - (u32) number of buffers requested
511 @return - (u32) number of buffers actually allocated, may be
512 less than the number requested or zero
513*/
514always_inline u32
515vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
516 u32 ring_size, u32 n_buffers)
517{
518 u32 n_alloc;
519
520 ASSERT (n_buffers <= ring_size);
521
522 if (PREDICT_TRUE (start + n_buffers <= ring_size))
523 return vlib_buffer_alloc (vm, ring + start, n_buffers);
524
525 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
526
527 if (PREDICT_TRUE (n_alloc == ring_size - start))
528 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
529
530 return n_alloc;
531}
532
Ed Warnickecb9cada2015-12-08 15:45:58 -0700533/** \brief Free buffers
534 Frees the entire buffer chain for each buffer
535
536 @param vm - (vlib_main_t *) vlib main data structure pointer
537 @param buffers - (u32 * ) buffer index array
538 @param n_buffers - (u32) number of buffers to free
539
540*/
Damjan Marion878c6092017-01-04 13:19:27 +0100541always_inline void
542vlib_buffer_free (vlib_main_t * vm,
543 /* pointer to first buffer */
544 u32 * buffers,
545 /* number of buffers to free */
546 u32 n_buffers)
547{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100548 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100549
550 ASSERT (bm->cb.vlib_buffer_free_cb);
551
552 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
553}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700554
555/** \brief Free buffers, does not free the buffer chain for each buffer
556
557 @param vm - (vlib_main_t *) vlib main data structure pointer
558 @param buffers - (u32 * ) buffer index array
559 @param n_buffers - (u32) number of buffers to free
560
561*/
Damjan Marion878c6092017-01-04 13:19:27 +0100562always_inline void
563vlib_buffer_free_no_next (vlib_main_t * vm,
564 /* pointer to first buffer */
565 u32 * buffers,
566 /* number of buffers to free */
567 u32 n_buffers)
568{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100569 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100570
571 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
572
573 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
574}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700575
576/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400577 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700578
579 @param vm - (vlib_main_t *) vlib main data structure pointer
580 @param buffer_index - (u32) buffer index to free
581*/
582always_inline void
583vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
584{
585 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
586}
587
Damjan Mariona3731492018-02-25 22:50:39 +0100588/** \brief Free buffers from ring
589
590 @param vm - (vlib_main_t *) vlib main data structure pointer
591 @param buffers - (u32 * ) buffer index ring
592 @param start - (u32) first slot in the ring
593 @param ring_size - (u32) ring size
594 @param n_buffers - (u32) number of buffers
595*/
596always_inline void
597vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
598 u32 ring_size, u32 n_buffers)
599{
600 ASSERT (n_buffers <= ring_size);
601
602 if (PREDICT_TRUE (start + n_buffers <= ring_size))
603 {
604 vlib_buffer_free (vm, ring + start, n_buffers);
605 }
606 else
607 {
608 vlib_buffer_free (vm, ring + start, ring_size - start);
609 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
610 }
611}
612
Damjan Marioncef1db92018-03-28 18:27:38 +0200613/** \brief Free buffers from ring without freeing tail buffers
614
615 @param vm - (vlib_main_t *) vlib main data structure pointer
616 @param buffers - (u32 * ) buffer index ring
617 @param start - (u32) first slot in the ring
618 @param ring_size - (u32) ring size
619 @param n_buffers - (u32) number of buffers
620*/
621always_inline void
622vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
623 u32 ring_size, u32 n_buffers)
624{
625 ASSERT (n_buffers <= ring_size);
626
627 if (PREDICT_TRUE (start + n_buffers <= ring_size))
628 {
Damjan Marion4a973932018-06-09 19:29:16 +0200629 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +0200630 }
631 else
632 {
633 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
634 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
635 }
636}
Damjan Mariona3731492018-02-25 22:50:39 +0100637
Ed Warnickecb9cada2015-12-08 15:45:58 -0700638/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100639vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
640 u32 n_data_bytes,
641 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100642always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100643vlib_buffer_delete_free_list (vlib_main_t * vm,
644 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100645{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100646 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100647
648 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
649
650 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
651}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700652
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100653/* Make sure we have at least given number of unaligned buffers. */
654void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
655 vlib_buffer_free_list_t *
656 free_list,
657 uword n_unaligned_buffers);
658
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100659always_inline vlib_buffer_free_list_t *
660vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100661 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100662{
Damjan Mariondac03522018-02-01 15:30:13 +0100663 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100664
Damjan Marion072401e2017-07-13 18:53:27 +0200665 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Mariond1274cb2018-03-13 21:32:17 +0100666 return pool_elt_at_index (vm->buffer_free_list_pool, i);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100667}
668
Ed Warnickecb9cada2015-12-08 15:45:58 -0700669always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100670vlib_buffer_get_free_list (vlib_main_t * vm,
671 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700672{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400673 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700674
Damjan Mariond1274cb2018-03-13 21:32:17 +0100675 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700676
677 /* Sanity: indices must match. */
678 ASSERT (f->index == free_list_index);
679
680 return f;
681}
682
683always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100684vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
685 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700686{
Damjan Mariondac03522018-02-01 15:30:13 +0100687 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700688 return f->n_data_bytes;
689}
690
Dave Barach9b8ffd92016-07-08 08:13:45 -0400691void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700692
693/* Reasonably fast buffer copy routine. */
694always_inline void
695vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
696{
697 while (n >= 4)
698 {
699 dst[0] = src[0];
700 dst[1] = src[1];
701 dst[2] = src[2];
702 dst[3] = src[3];
703 dst += 4;
704 src += 4;
705 n -= 4;
706 }
707 while (n > 0)
708 {
709 dst[0] = src[0];
710 dst += 1;
711 src += 1;
712 n -= 1;
713 }
714}
715
Ed Warnickecb9cada2015-12-08 15:45:58 -0700716/* Append given data to end of buffer, possibly allocating new buffers. */
717u32 vlib_buffer_add_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100718 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400719 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700720
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100721/* duplicate all buffers in chain */
722always_inline vlib_buffer_t *
723vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
724{
725 vlib_buffer_t *s, *d, *fd;
726 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100727 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100728 int i;
729
730 s = b;
731 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
732 {
733 n_buffers++;
734 s = vlib_get_buffer (vm, s->next_buffer);
735 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700736 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100737
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100738 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500739
740 /* No guarantee that we'll get all the buffers we asked for */
741 if (PREDICT_FALSE (n_alloc < n_buffers))
742 {
743 if (n_alloc > 0)
744 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500745 return 0;
746 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100747
748 /* 1st segment */
749 s = b;
750 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100751 d->current_data = s->current_data;
752 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100753 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100754 d->total_length_not_including_first_buffer =
755 s->total_length_not_including_first_buffer;
756 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Neale Ranns5b8ed982018-07-23 05:30:12 -0400757 clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
Damjan Mariondce05452016-12-01 11:59:33 +0100758 clib_memcpy (vlib_buffer_get_current (d),
759 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100760
761 /* next segments */
762 for (i = 1; i < n_buffers; i++)
763 {
764 /* previous */
765 d->next_buffer = new_buffers[i];
766 /* current */
767 s = vlib_get_buffer (vm, s->next_buffer);
768 d = vlib_get_buffer (vm, new_buffers[i]);
769 d->current_data = s->current_data;
770 d->current_length = s->current_length;
771 clib_memcpy (vlib_buffer_get_current (d),
772 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100773 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100774 }
775
776 return fd;
777}
778
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800779/** \brief Create a maximum of 256 clones of buffer and store them
780 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100781
782 @param vm - (vlib_main_t *) vlib main data structure pointer
783 @param src_buffer - (u32) source buffer index
784 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800785 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100786 @param head_end_offset - (u16) offset relative to current position
787 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800788 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100789 less than the number requested or zero
790*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800791always_inline u16
792vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
793 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100794{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800795 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100796 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
797
798 ASSERT (s->n_add_refs == 0);
799 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800800 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100801
802 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
803 {
804 buffers[0] = src_buffer;
805 for (i = 1; i < n_buffers; i++)
806 {
807 vlib_buffer_t *d;
808 d = vlib_buffer_copy (vm, s);
809 if (d == 0)
810 return i;
811 buffers[i] = vlib_get_buffer_index (vm, d);
812
813 }
814 return n_buffers;
815 }
816
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800817 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100818 {
819 buffers[0] = src_buffer;
820 return 1;
821 }
822
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800823 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
824 vlib_buffer_get_free_list_index
825 (s));
826
Damjan Marionc47ed032017-01-25 14:18:03 +0100827 for (i = 0; i < n_buffers; i++)
828 {
829 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
830 d->current_data = s->current_data;
831 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200832 vlib_buffer_set_free_list_index (d,
833 vlib_buffer_get_free_list_index (s));
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200834
835 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100836 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200837 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
838 {
839 d->total_length_not_including_first_buffer +=
840 s->total_length_not_including_first_buffer;
841 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100842 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
843 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
844 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Neale Ranns5b8ed982018-07-23 05:30:12 -0400845 clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
Damjan Marionc47ed032017-01-25 14:18:03 +0100846 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
847 head_end_offset);
848 d->next_buffer = src_buffer;
849 }
850 vlib_buffer_advance (s, head_end_offset);
851 s->n_add_refs = n_buffers - 1;
852 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
853 {
854 s = vlib_get_buffer (vm, s->next_buffer);
855 s->n_add_refs = n_buffers - 1;
856 }
857
858 return n_buffers;
859}
860
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800861/** \brief Create multiple clones of buffer and store them
862 in the supplied array
863
864 @param vm - (vlib_main_t *) vlib main data structure pointer
865 @param src_buffer - (u32) source buffer index
866 @param buffers - (u32 * ) buffer index array
867 @param n_buffers - (u16) number of buffer clones requested (<=256)
868 @param head_end_offset - (u16) offset relative to current position
869 where packet head ends
870 @return - (u16) number of buffers actually cloned, may be
871 less than the number requested or zero
872*/
873always_inline u16
874vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
875 u16 n_buffers, u16 head_end_offset)
876{
877 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
878 u16 n_cloned = 0;
879
880 while (n_buffers > 256)
881 {
882 vlib_buffer_t *copy;
883 copy = vlib_buffer_copy (vm, s);
884 n_cloned += vlib_buffer_clone_256 (vm,
885 vlib_get_buffer_index (vm, copy),
886 (buffers + n_cloned),
887 256, head_end_offset);
888 n_buffers -= 256;
889 }
890 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
891 buffers + n_cloned,
892 n_buffers, head_end_offset);
893
894 return n_cloned;
895}
896
Damjan Marionc47ed032017-01-25 14:18:03 +0100897/** \brief Attach cloned tail to the buffer
898
899 @param vm - (vlib_main_t *) vlib main data structure pointer
900 @param head - (vlib_buffer_t *) head buffer
901 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
902*/
903
904always_inline void
905vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
906 vlib_buffer_t * tail)
907{
908 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200909 ASSERT (vlib_buffer_get_free_list_index (head) ==
910 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100911
912 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
913 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
914 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
915 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
916 head->next_buffer = vlib_get_buffer_index (vm, tail);
917 head->total_length_not_including_first_buffer = tail->current_length +
918 tail->total_length_not_including_first_buffer;
919
920next_segment:
921 __sync_add_and_fetch (&tail->n_add_refs, 1);
922
923 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
924 {
925 tail = vlib_get_buffer (vm, tail->next_buffer);
926 goto next_segment;
927 }
928}
929
Pierre Pfister328e99b2016-02-12 13:18:42 +0000930/* Initializes the buffer as an empty packet with no chained buffers. */
931always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400932vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000933{
934 first->total_length_not_including_first_buffer = 0;
935 first->current_length = 0;
936 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
937 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000938}
939
940/* The provided next_bi buffer index is appended to the end of the packet. */
941always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400942vlib_buffer_chain_buffer (vlib_main_t * vm,
943 vlib_buffer_t * first,
944 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000945{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400946 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000947 last->next_buffer = next_bi;
948 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
949 next_buffer->current_length = 0;
950 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000951 return next_buffer;
952}
953
954/* Increases or decreases the packet length.
955 * It does not allocate or deallocate new buffers.
956 * Therefore, the added length must be compatible
957 * with the last buffer. */
958always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400959vlib_buffer_chain_increase_length (vlib_buffer_t * first,
960 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000961{
962 last->current_length += len;
963 if (first != last)
964 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000965}
966
967/* Copy data to the end of the packet and increases its length.
968 * It does not allocate new buffers.
969 * Returns the number of copied bytes. */
970always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400971vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100972 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400973 vlib_buffer_t * first,
974 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000975{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400976 u32 n_buffer_bytes =
977 vlib_buffer_free_list_buffer_size (vm, free_list_index);
978 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
979 u16 len = clib_min (data_len,
980 n_buffer_bytes - last->current_length -
981 last->current_data);
982 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
983 len);
984 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000985 return len;
986}
987
988/* Copy data to the end of the packet and increases its length.
989 * Allocates additional buffers from the free list if necessary.
990 * Returns the number of copied bytes.
991 * 'last' value is modified whenever new buffers are allocated and
992 * chained and points to the last buffer in the chain. */
993u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400994vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100995 vlib_buffer_free_list_index_t
996 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400997 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +0100998 vlib_buffer_t ** last, void *data,
999 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001000void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001001
Dave Barach9b8ffd92016-07-08 08:13:45 -04001002format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
1003 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001004
Dave Barach9b8ffd92016-07-08 08:13:45 -04001005typedef struct
1006{
Ed Warnickecb9cada2015-12-08 15:45:58 -07001007 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001008 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001009
Damjan Mariond1274cb2018-03-13 21:32:17 +01001010 /* Number of buffers to allocate in each call to allocator. */
1011 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001012
1013 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +01001014 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015
Dave Barach9b8ffd92016-07-08 08:13:45 -04001016 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001017} vlib_packet_template_t;
1018
1019void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
1020 vlib_packet_template_t * t);
1021
1022void vlib_packet_template_init (vlib_main_t * vm,
1023 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001024 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001025 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +01001026 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001027 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001028
Dave Barach9b8ffd92016-07-08 08:13:45 -04001029void *vlib_packet_template_get_packet (vlib_main_t * vm,
1030 vlib_packet_template_t * t,
1031 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001032
1033always_inline void
1034vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1035{
1036 vec_free (t->packet_data);
1037}
1038
1039always_inline u32
1040unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
1041{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001042 serialize_stream_t *s = &m->stream;
1043 vlib_serialize_buffer_main_t *sm
1044 = uword_to_pointer (m->stream.data_function_opaque,
1045 vlib_serialize_buffer_main_t *);
1046 vlib_main_t *vm = sm->vlib_main;
1047 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001048
1049 n = s->n_buffer_bytes - s->current_buffer_index;
1050 if (sm->last_buffer != ~0)
1051 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001052 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001053 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1054 {
1055 b = vlib_get_buffer (vm, b->next_buffer);
1056 n += b->current_length;
1057 }
1058 }
1059
Dave Barach9b8ffd92016-07-08 08:13:45 -04001060 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001061 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
1062 n += vlib_buffer_index_length_in_chain (vm, f[0]);
1063 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -04001064/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001065
1066 return n;
1067}
1068
Ed Warnickecb9cada2015-12-08 15:45:58 -07001069/* Set a buffer quickly into "uninitialized" state. We want this to
1070 be extremely cheap and arrange for all fields that need to be
1071 initialized to be in the first 128 bits of the buffer. */
1072always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001073vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001074 vlib_buffer_free_list_t * fl)
1075{
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001076 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001077
Damjan Marion19010202016-03-24 17:17:47 +01001078 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001079 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
1080 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
1081 CLIB_CACHE_LINE_BYTES);
1082 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
1083 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +01001084
Ed Warnickecb9cada2015-12-08 15:45:58 -07001085 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +02001086 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001087
Dave Barachf8690282017-03-01 11:38:02 -05001088 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
1089 STRUCT_MARK_PTR (src, template_start),
1090 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
1091 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
1092
1093 /* Not in the first 16 octets. */
1094 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +01001095 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -05001096
Ed Warnickecb9cada2015-12-08 15:45:58 -07001097 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -05001098#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001099 _(current_data);
1100 _(current_length);
1101 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001102#undef _
Florin Corasb2215d62017-08-01 16:56:58 -07001103 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
1104 /* total_length_not_including_first_buffer is not in the template anymore
1105 * so it may actually not zeroed for some buffers. One option is to
1106 * uncomment the line lower (comes at a cost), the other, is to just not
1107 * care */
1108 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001109 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001110}
1111
1112always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001113vlib_buffer_add_to_free_list (vlib_main_t * vm,
1114 vlib_buffer_free_list_t * f,
1115 u32 buffer_index, u8 do_init)
1116{
Damjan Mariond1274cb2018-03-13 21:32:17 +01001117 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001118 vlib_buffer_t *b;
1119 b = vlib_get_buffer (vm, buffer_index);
1120 if (PREDICT_TRUE (do_init))
1121 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001122 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001123
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001124 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001125 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001126 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001127 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001128 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001129 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001130 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001131 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001132 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001133 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001134}
1135
Ed Warnickecb9cada2015-12-08 15:45:58 -07001136#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001137extern u32 *vlib_buffer_state_validation_lock;
1138extern uword *vlib_buffer_state_validation_hash;
1139extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001140#endif
1141
Dave Barach9b8ffd92016-07-08 08:13:45 -04001142static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001143vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1144{
1145#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001146 uword *p;
1147 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001148
1149 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1150
1151 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
1152 ;
1153
1154 p = hash_get (vlib_buffer_state_validation_hash, b);
1155
1156 /* If we don't know about b, declare it to be in the expected state */
1157 if (!p)
1158 {
1159 hash_set (vlib_buffer_state_validation_hash, b, expected);
1160 goto out;
1161 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001162
Ed Warnickecb9cada2015-12-08 15:45:58 -07001163 if (p[0] != expected)
1164 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001165 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001166 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001167 vlib_main_t *vm = &vlib_global_main;
1168
1169 cj_stop ();
1170
Ed Warnickecb9cada2015-12-08 15:45:58 -07001171 bi = vlib_get_buffer_index (vm, b);
1172
1173 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001174 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1175 vlib_time_now (vm), bi,
1176 p[0] ? "busy" : "free", expected ? "busy" : "free");
1177 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001178 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001179out:
1180 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001181 *vlib_buffer_state_validation_lock = 0;
1182 clib_mem_set_heap (oldheap);
1183#endif
1184}
1185
Dave Barach9b8ffd92016-07-08 08:13:45 -04001186static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001187vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1188{
1189#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001190 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001191
1192 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1193
1194 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
1195 ;
1196
1197 hash_set (vlib_buffer_state_validation_hash, b, expected);
1198
Dave Barach9b8ffd92016-07-08 08:13:45 -04001199 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001200 *vlib_buffer_state_validation_lock = 0;
1201 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001202#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001203}
1204
Klement Sekera75e7d132017-09-20 08:26:30 +02001205/** minimum data size of first buffer in a buffer chain */
1206#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1207
1208/**
1209 * @brief compress buffer chain in a way where the first buffer is at least
1210 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1211 *
1212 * @param[in] vm - vlib_main
1213 * @param[in,out] first - first buffer in chain
1214 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1215 * from the chain
1216 */
1217always_inline void
1218vlib_buffer_chain_compress (vlib_main_t * vm,
1219 vlib_buffer_t * first, u32 ** discard_vector)
1220{
1221 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1222 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1223 {
1224 /* this is already big enough or not a chain */
1225 return;
1226 }
1227 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001228 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001229 vlib_buffer_free_list_t *free_list =
1230 vlib_buffer_get_buffer_free_list (vm, first, &index);
1231
1232 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1233 free_list->n_data_bytes -
1234 first->current_data);
1235 do
1236 {
1237 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1238 u32 need = want_first_size - first->current_length;
1239 u32 amount_to_copy = clib_min (need, second->current_length);
1240 clib_memcpy (((u8 *) vlib_buffer_get_current (first)) +
1241 first->current_length,
1242 vlib_buffer_get_current (second), amount_to_copy);
1243 first->current_length += amount_to_copy;
1244 vlib_buffer_advance (second, amount_to_copy);
1245 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1246 {
1247 first->total_length_not_including_first_buffer -= amount_to_copy;
1248 }
1249 if (!second->current_length)
1250 {
1251 vec_add1 (*discard_vector, first->next_buffer);
1252 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1253 {
1254 first->next_buffer = second->next_buffer;
1255 }
1256 else
1257 {
1258 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1259 }
1260 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1261 }
1262 }
1263 while ((first->current_length < want_first_size) &&
1264 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1265}
1266
Ed Warnickecb9cada2015-12-08 15:45:58 -07001267#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001268
1269/*
1270 * fd.io coding-style-patch-verification: ON
1271 *
1272 * Local Variables:
1273 * eval: (c-set-style "gnu")
1274 * End:
1275 */