blob: d8abdf31d79bd394a531dc0dece304deda274cb9 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
46/** \file
47 vlib buffer access methods.
48*/
49
50
51/** \brief Translate buffer index into buffer pointer
52
53 @param vm - (vlib_main_t *) vlib main data structure pointer
54 @param buffer_index - (u32) buffer index
55 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040056*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070057always_inline vlib_buffer_t *
58vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
59{
Damjan Mariond1274cb2018-03-13 21:32:17 +010060 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020061 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
62 ASSERT (offset < bm->buffer_mem_size);
63
64 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070065}
66
Damjan Marionafe56de2018-05-17 12:44:00 +020067/** \brief Translate array of buffer indices into buffer pointers with offset
68
69 @param vm - (vlib_main_t *) vlib main data structure pointer
70 @param bi - (u32 *) array of buffer indices
71 @param b - (void **) array to store buffer pointers
72 @param count - (uword) number of elements
73 @param offset - (i32) offset applied to each pointer
74*/
75static_always_inline void
76vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
77 i32 offset)
78{
79#ifdef CLIB_HAVE_VEC256
80 u64x4 off = u64x4_splat (buffer_main.buffer_mem_start + offset);
81 /* if count is not const, compiler will not unroll while loop
82 se we maintain two-in-parallel variant */
83 while (count >= 8)
84 {
85 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
86 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
87 /* shift and add to get vlib_buffer_t pointer */
88 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
89 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
90 b += 8;
91 bi += 8;
92 count -= 8;
93 }
94#endif
95 while (count >= 4)
96 {
97#ifdef CLIB_HAVE_VEC256
98 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
99 /* shift and add to get vlib_buffer_t pointer */
100 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800101#elif defined (CLIB_HAVE_VEC128)
Damjan Marion5df580e2018-07-27 01:47:57 +0200102 u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
103 u32x4 bi4 = u32x4_load_unaligned (bi);
104 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800105#if defined (__aarch64__)
106 u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
107#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200108 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
109 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800110#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200111 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
112 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200113#else
114 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
115 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
116 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
117 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
118#endif
119 b += 4;
120 bi += 4;
121 count -= 4;
122 }
123 while (count)
124 {
125 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
126 b += 1;
127 bi += 1;
128 count -= 1;
129 }
130}
131
132/** \brief Translate array of buffer indices into buffer pointers
133
134 @param vm - (vlib_main_t *) vlib main data structure pointer
135 @param bi - (u32 *) array of buffer indices
136 @param b - (vlib_buffer_t **) array to store buffer pointers
137 @param count - (uword) number of elements
138*/
139
140static_always_inline void
141vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
142{
143 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
144}
145
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146/** \brief Translate buffer pointer into buffer index
147
148 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400149 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400151*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200152
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400154vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100156 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200157 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
158 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
159 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400160 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
162}
163
Damjan Marionafe56de2018-05-17 12:44:00 +0200164/** \brief Translate array of buffer pointers into buffer indices with offset
165
166 @param vm - (vlib_main_t *) vlib main data structure pointer
167 @param b - (void **) array of buffer pointers
168 @param bi - (u32 *) array to store buffer indices
169 @param count - (uword) number of elements
170 @param offset - (i32) offset applied to each pointer
171*/
172static_always_inline void
173vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
174 uword count, i32 offset)
175{
176#ifdef CLIB_HAVE_VEC256
177 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
178 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - offset);
179
180 while (count >= 8)
181 {
182 /* load 4 pointers into 256-bit register */
183 u64x4 v0 = u64x4_load_unaligned (b);
184 u64x4 v1 = u64x4_load_unaligned (b + 4);
185 u32x8 v2, v3;
186
187 v0 -= off4;
188 v1 -= off4;
189
190 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
191 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
192
193 /* permute 256-bit register so lower u32s of each buffer index are
194 * placed into lower 128-bits */
195 v2 = u32x8_permute ((u32x8) v0, mask);
196 v3 = u32x8_permute ((u32x8) v1, mask);
197
198 /* extract lower 128-bits and save them to the array of buffer indices */
199 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
200 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
201 bi += 8;
202 b += 8;
203 count -= 8;
204 }
205#endif
206 while (count >= 4)
207 {
208 /* equivalent non-nector implementation */
209 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
210 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
211 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
212 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
213 bi += 4;
214 b += 4;
215 count -= 4;
216 }
217 while (count)
218 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400219 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200220 bi += 1;
221 b += 1;
222 count -= 1;
223 }
224}
225
226/** \brief Translate array of buffer pointers into buffer indices
227
228 @param vm - (vlib_main_t *) vlib main data structure pointer
229 @param b - (vlib_buffer_t **) array of buffer pointers
230 @param bi - (u32 *) array to store buffer indices
231 @param count - (uword) number of elements
232*/
233static_always_inline void
234vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
235 uword count)
236{
237 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
238}
239
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240/** \brief Get next buffer in buffer linklist, or zero for end of list.
241
242 @param vm - (vlib_main_t *) vlib main data structure pointer
243 @param b - (void *) buffer pointer
244 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246always_inline vlib_buffer_t *
247vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
248{
249 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400250 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251}
252
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
254 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255
256/** \brief Get length in bytes of the buffer chain
257
258 @param vm - (vlib_main_t *) vlib main data structure pointer
259 @param b - (void *) buffer pointer
260 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400261*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262always_inline uword
263vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
264{
Damjan Marion072401e2017-07-13 18:53:27 +0200265 uword len = b->current_length;
266
267 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
268 return len;
269
270 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
271 return len + b->total_length_not_including_first_buffer;
272
273 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274}
275
276/** \brief Get length in bytes of the buffer index buffer chain
277
278 @param vm - (vlib_main_t *) vlib main data structure pointer
279 @param bi - (u32) buffer index
280 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400281*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282always_inline uword
283vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
284{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400285 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286 return vlib_buffer_length_in_chain (vm, b);
287}
288
289/** \brief Copy buffer contents to memory
290
291 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400292 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 @param contents - (u8 *) memory, <strong>must be large enough</strong>
294 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400295*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296always_inline uword
297vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
298{
299 uword content_len = 0;
300 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400301 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
303 while (1)
304 {
305 b = vlib_get_buffer (vm, buffer_index);
306 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100307 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400309 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310 break;
311 buffer_index = b->next_buffer;
312 }
313
314 return content_len;
315}
316
317/* Return physical address of buffer->data start. */
318always_inline u64
319vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
320{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100321 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion149ba772017-10-12 13:09:26 +0200322 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
Damjan Marioncef87f12017-10-05 15:32:41 +0200323 vlib_buffer_pool_t *pool = vec_elt_at_index (bm->buffer_pools,
324 b->buffer_pool_index);
325
326 return vlib_physmem_virtual_to_physical (vm, pool->physmem_region, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700327}
328
329/** \brief Prefetch buffer metadata by buffer index
330 The first 64 bytes of buffer contains most header information
331
332 @param vm - (vlib_main_t *) vlib main data structure pointer
333 @param bi - (u32) buffer index
334 @param type - LOAD, STORE. In most cases, STORE is the right answer
335*/
336/* Prefetch buffer header given index. */
337#define vlib_prefetch_buffer_with_index(vm,bi,type) \
338 do { \
339 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
340 vlib_prefetch_buffer_header (_b, type); \
341 } while (0)
342
343#if 0
344/* Iterate over known allocated vlib bufs. You probably do not want
345 * to do this!
346 @param vm the vlib_main_t
347 @param bi found allocated buffer index
348 @param body operation to perform on buffer index
349 function executes body for each allocated buffer index
350 */
351#define vlib_buffer_foreach_allocated(vm,bi,body) \
352do { \
353 vlib_main_t * _vmain = (vm); \
354 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
355 hash_pair_t * _vbpair; \
356 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
357 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
358 (bi) = _vbpair->key; \
359 body; \
360 } \
361 })); \
362} while (0)
363#endif
364
Dave Barach9b8ffd92016-07-08 08:13:45 -0400365typedef enum
366{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367 /* Index is unknown. */
368 VLIB_BUFFER_UNKNOWN,
369
370 /* Index is known and free/allocated. */
371 VLIB_BUFFER_KNOWN_FREE,
372 VLIB_BUFFER_KNOWN_ALLOCATED,
373} vlib_buffer_known_state_t;
374
Damjan Marionc8a26c62017-11-24 20:15:23 +0100375void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
376 uword n_buffers,
377 vlib_buffer_known_state_t
378 expected_state);
379
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800381vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100383 vlib_buffer_main_t *bm = &buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700384
Damjan Marion6b0f5892017-07-27 04:01:24 -0400385 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400386 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400387 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
389}
390
391always_inline void
Steven899a84b2018-01-29 20:09:09 -0800392vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700393 vlib_buffer_known_state_t state)
394{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100395 vlib_buffer_main_t *bm = &buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800396
Damjan Marion6b0f5892017-07-27 04:01:24 -0400397 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700398 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400399 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400}
401
402/* Validates sanity of a single buffer.
403 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400404u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
405 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406
Ed Warnickecb9cada2015-12-08 15:45:58 -0700407always_inline u32
408vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400409{
410 return round_pow2 (size, sizeof (vlib_buffer_t));
411}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700412
Damjan Mariondac03522018-02-01 15:30:13 +0100413always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200414vlib_buffer_get_free_list_index (vlib_buffer_t * b)
415{
Damjan Mariondac03522018-02-01 15:30:13 +0100416 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
417 return b->free_list_index;
418
419 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200420}
421
422always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100423vlib_buffer_set_free_list_index (vlib_buffer_t * b,
424 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200425{
Damjan Mariondac03522018-02-01 15:30:13 +0100426 if (PREDICT_FALSE (index))
427 {
428 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
429 b->free_list_index = index;
430 }
431 else
432 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200433}
434
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435/** \brief Allocate buffers from specific freelist into supplied array
436
437 @param vm - (vlib_main_t *) vlib main data structure pointer
438 @param buffers - (u32 * ) buffer index array
439 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400440 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441 less than the number requested or zero
442*/
Damjan Marion878c6092017-01-04 13:19:27 +0100443always_inline u32
444vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
445 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100446 u32 n_buffers,
447 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100448{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100449 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100450 vlib_buffer_free_list_t *fl;
451 u32 *src;
452 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100453
Damjan Marionc8a26c62017-11-24 20:15:23 +0100454 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100455
Damjan Mariond1274cb2018-03-13 21:32:17 +0100456 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100457
458 len = vec_len (fl->buffers);
459
460 if (PREDICT_FALSE (len < n_buffers))
461 {
462 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100463 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
464 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100465
466 /* even if fill free list didn't manage to refill free list
467 we should give what we have */
468 n_buffers = clib_min (len, n_buffers);
469
470 /* following code is intentionaly duplicated to allow compiler
471 to optimize fast path when n_buffers is constant value */
472 src = fl->buffers + len - n_buffers;
473 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
474 _vec_len (fl->buffers) -= n_buffers;
475
476 /* Verify that buffers are known free. */
477 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
478 VLIB_BUFFER_KNOWN_FREE);
479
480 return n_buffers;
481 }
482
483 src = fl->buffers + len - n_buffers;
484 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
485 _vec_len (fl->buffers) -= n_buffers;
486
487 /* Verify that buffers are known free. */
488 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
489 VLIB_BUFFER_KNOWN_FREE);
490
491 return n_buffers;
492}
493
494/** \brief Allocate buffers into supplied array
495
496 @param vm - (vlib_main_t *) vlib main data structure pointer
497 @param buffers - (u32 * ) buffer index array
498 @param n_buffers - (u32) number of buffers requested
499 @return - (u32) number of buffers actually allocated, may be
500 less than the number requested or zero
501*/
502always_inline u32
503vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
504{
505 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
506 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100507}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700508
Damjan Marionc58408c2018-01-18 14:54:04 +0100509/** \brief Allocate buffers into ring
510
511 @param vm - (vlib_main_t *) vlib main data structure pointer
512 @param buffers - (u32 * ) buffer index ring
513 @param start - (u32) first slot in the ring
514 @param ring_size - (u32) ring size
515 @param n_buffers - (u32) number of buffers requested
516 @return - (u32) number of buffers actually allocated, may be
517 less than the number requested or zero
518*/
519always_inline u32
520vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
521 u32 ring_size, u32 n_buffers)
522{
523 u32 n_alloc;
524
525 ASSERT (n_buffers <= ring_size);
526
527 if (PREDICT_TRUE (start + n_buffers <= ring_size))
528 return vlib_buffer_alloc (vm, ring + start, n_buffers);
529
530 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
531
532 if (PREDICT_TRUE (n_alloc == ring_size - start))
533 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
534
535 return n_alloc;
536}
537
Ed Warnickecb9cada2015-12-08 15:45:58 -0700538/** \brief Free buffers
539 Frees the entire buffer chain for each buffer
540
541 @param vm - (vlib_main_t *) vlib main data structure pointer
542 @param buffers - (u32 * ) buffer index array
543 @param n_buffers - (u32) number of buffers to free
544
545*/
Damjan Marion878c6092017-01-04 13:19:27 +0100546always_inline void
547vlib_buffer_free (vlib_main_t * vm,
548 /* pointer to first buffer */
549 u32 * buffers,
550 /* number of buffers to free */
551 u32 n_buffers)
552{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100553 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100554
555 ASSERT (bm->cb.vlib_buffer_free_cb);
556
557 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
558}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700559
560/** \brief Free buffers, does not free the buffer chain for each buffer
561
562 @param vm - (vlib_main_t *) vlib main data structure pointer
563 @param buffers - (u32 * ) buffer index array
564 @param n_buffers - (u32) number of buffers to free
565
566*/
Damjan Marion878c6092017-01-04 13:19:27 +0100567always_inline void
568vlib_buffer_free_no_next (vlib_main_t * vm,
569 /* pointer to first buffer */
570 u32 * buffers,
571 /* number of buffers to free */
572 u32 n_buffers)
573{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100574 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100575
576 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
577
578 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
579}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700580
581/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400582 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700583
584 @param vm - (vlib_main_t *) vlib main data structure pointer
585 @param buffer_index - (u32) buffer index to free
586*/
587always_inline void
588vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
589{
590 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
591}
592
Damjan Mariona3731492018-02-25 22:50:39 +0100593/** \brief Free buffers from ring
594
595 @param vm - (vlib_main_t *) vlib main data structure pointer
596 @param buffers - (u32 * ) buffer index ring
597 @param start - (u32) first slot in the ring
598 @param ring_size - (u32) ring size
599 @param n_buffers - (u32) number of buffers
600*/
601always_inline void
602vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
603 u32 ring_size, u32 n_buffers)
604{
605 ASSERT (n_buffers <= ring_size);
606
607 if (PREDICT_TRUE (start + n_buffers <= ring_size))
608 {
609 vlib_buffer_free (vm, ring + start, n_buffers);
610 }
611 else
612 {
613 vlib_buffer_free (vm, ring + start, ring_size - start);
614 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
615 }
616}
617
Damjan Marioncef1db92018-03-28 18:27:38 +0200618/** \brief Free buffers from ring without freeing tail buffers
619
620 @param vm - (vlib_main_t *) vlib main data structure pointer
621 @param buffers - (u32 * ) buffer index ring
622 @param start - (u32) first slot in the ring
623 @param ring_size - (u32) ring size
624 @param n_buffers - (u32) number of buffers
625*/
626always_inline void
627vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
628 u32 ring_size, u32 n_buffers)
629{
630 ASSERT (n_buffers <= ring_size);
631
632 if (PREDICT_TRUE (start + n_buffers <= ring_size))
633 {
Damjan Marion4a973932018-06-09 19:29:16 +0200634 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +0200635 }
636 else
637 {
638 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
639 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
640 }
641}
Damjan Mariona3731492018-02-25 22:50:39 +0100642
Ed Warnickecb9cada2015-12-08 15:45:58 -0700643/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100644vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
645 u32 n_data_bytes,
646 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100647always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100648vlib_buffer_delete_free_list (vlib_main_t * vm,
649 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100650{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100651 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100652
653 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
654
655 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
656}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700657
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100658/* Make sure we have at least given number of unaligned buffers. */
659void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
660 vlib_buffer_free_list_t *
661 free_list,
662 uword n_unaligned_buffers);
663
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100664always_inline vlib_buffer_free_list_t *
665vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100666 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100667{
Damjan Mariondac03522018-02-01 15:30:13 +0100668 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100669
Damjan Marion072401e2017-07-13 18:53:27 +0200670 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Mariond1274cb2018-03-13 21:32:17 +0100671 return pool_elt_at_index (vm->buffer_free_list_pool, i);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100672}
673
Ed Warnickecb9cada2015-12-08 15:45:58 -0700674always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100675vlib_buffer_get_free_list (vlib_main_t * vm,
676 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700677{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400678 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700679
Damjan Mariond1274cb2018-03-13 21:32:17 +0100680 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700681
682 /* Sanity: indices must match. */
683 ASSERT (f->index == free_list_index);
684
685 return f;
686}
687
688always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100689vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
690 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691{
Damjan Mariondac03522018-02-01 15:30:13 +0100692 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700693 return f->n_data_bytes;
694}
695
Dave Barach9b8ffd92016-07-08 08:13:45 -0400696void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700697
698/* Reasonably fast buffer copy routine. */
699always_inline void
700vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
701{
702 while (n >= 4)
703 {
704 dst[0] = src[0];
705 dst[1] = src[1];
706 dst[2] = src[2];
707 dst[3] = src[3];
708 dst += 4;
709 src += 4;
710 n -= 4;
711 }
712 while (n > 0)
713 {
714 dst[0] = src[0];
715 dst += 1;
716 src += 1;
717 n -= 1;
718 }
719}
720
Ed Warnickecb9cada2015-12-08 15:45:58 -0700721/* Append given data to end of buffer, possibly allocating new buffers. */
722u32 vlib_buffer_add_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100723 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400724 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700725
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100726/* duplicate all buffers in chain */
727always_inline vlib_buffer_t *
728vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
729{
730 vlib_buffer_t *s, *d, *fd;
731 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100732 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100733 int i;
734
735 s = b;
736 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
737 {
738 n_buffers++;
739 s = vlib_get_buffer (vm, s->next_buffer);
740 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700741 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100742
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100743 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500744
745 /* No guarantee that we'll get all the buffers we asked for */
746 if (PREDICT_FALSE (n_alloc < n_buffers))
747 {
748 if (n_alloc > 0)
749 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500750 return 0;
751 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100752
753 /* 1st segment */
754 s = b;
755 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100756 d->current_data = s->current_data;
757 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100758 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100759 d->total_length_not_including_first_buffer =
760 s->total_length_not_including_first_buffer;
761 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Neale Ranns5b8ed982018-07-23 05:30:12 -0400762 clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
Damjan Mariondce05452016-12-01 11:59:33 +0100763 clib_memcpy (vlib_buffer_get_current (d),
764 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100765
766 /* next segments */
767 for (i = 1; i < n_buffers; i++)
768 {
769 /* previous */
770 d->next_buffer = new_buffers[i];
771 /* current */
772 s = vlib_get_buffer (vm, s->next_buffer);
773 d = vlib_get_buffer (vm, new_buffers[i]);
774 d->current_data = s->current_data;
775 d->current_length = s->current_length;
776 clib_memcpy (vlib_buffer_get_current (d),
777 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100778 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100779 }
780
781 return fd;
782}
783
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800784/** \brief Create a maximum of 256 clones of buffer and store them
785 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100786
787 @param vm - (vlib_main_t *) vlib main data structure pointer
788 @param src_buffer - (u32) source buffer index
789 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800790 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100791 @param head_end_offset - (u16) offset relative to current position
792 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800793 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100794 less than the number requested or zero
795*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800796always_inline u16
797vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
798 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100799{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800800 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100801 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
802
803 ASSERT (s->n_add_refs == 0);
804 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800805 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100806
807 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
808 {
809 buffers[0] = src_buffer;
810 for (i = 1; i < n_buffers; i++)
811 {
812 vlib_buffer_t *d;
813 d = vlib_buffer_copy (vm, s);
814 if (d == 0)
815 return i;
816 buffers[i] = vlib_get_buffer_index (vm, d);
817
818 }
819 return n_buffers;
820 }
821
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800822 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100823 {
824 buffers[0] = src_buffer;
825 return 1;
826 }
827
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800828 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
829 vlib_buffer_get_free_list_index
830 (s));
831
Damjan Marionc47ed032017-01-25 14:18:03 +0100832 for (i = 0; i < n_buffers; i++)
833 {
834 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
835 d->current_data = s->current_data;
836 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200837 vlib_buffer_set_free_list_index (d,
838 vlib_buffer_get_free_list_index (s));
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200839
840 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100841 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200842 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
843 {
844 d->total_length_not_including_first_buffer +=
845 s->total_length_not_including_first_buffer;
846 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100847 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
848 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
849 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Neale Ranns5b8ed982018-07-23 05:30:12 -0400850 clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
Damjan Marionc47ed032017-01-25 14:18:03 +0100851 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
852 head_end_offset);
853 d->next_buffer = src_buffer;
854 }
855 vlib_buffer_advance (s, head_end_offset);
856 s->n_add_refs = n_buffers - 1;
857 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
858 {
859 s = vlib_get_buffer (vm, s->next_buffer);
860 s->n_add_refs = n_buffers - 1;
861 }
862
863 return n_buffers;
864}
865
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800866/** \brief Create multiple clones of buffer and store them
867 in the supplied array
868
869 @param vm - (vlib_main_t *) vlib main data structure pointer
870 @param src_buffer - (u32) source buffer index
871 @param buffers - (u32 * ) buffer index array
872 @param n_buffers - (u16) number of buffer clones requested (<=256)
873 @param head_end_offset - (u16) offset relative to current position
874 where packet head ends
875 @return - (u16) number of buffers actually cloned, may be
876 less than the number requested or zero
877*/
878always_inline u16
879vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
880 u16 n_buffers, u16 head_end_offset)
881{
882 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
883 u16 n_cloned = 0;
884
885 while (n_buffers > 256)
886 {
887 vlib_buffer_t *copy;
888 copy = vlib_buffer_copy (vm, s);
889 n_cloned += vlib_buffer_clone_256 (vm,
890 vlib_get_buffer_index (vm, copy),
891 (buffers + n_cloned),
892 256, head_end_offset);
893 n_buffers -= 256;
894 }
895 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
896 buffers + n_cloned,
897 n_buffers, head_end_offset);
898
899 return n_cloned;
900}
901
Damjan Marionc47ed032017-01-25 14:18:03 +0100902/** \brief Attach cloned tail to the buffer
903
904 @param vm - (vlib_main_t *) vlib main data structure pointer
905 @param head - (vlib_buffer_t *) head buffer
906 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
907*/
908
909always_inline void
910vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
911 vlib_buffer_t * tail)
912{
913 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200914 ASSERT (vlib_buffer_get_free_list_index (head) ==
915 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100916
917 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
918 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
919 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
920 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
921 head->next_buffer = vlib_get_buffer_index (vm, tail);
922 head->total_length_not_including_first_buffer = tail->current_length +
923 tail->total_length_not_including_first_buffer;
924
925next_segment:
926 __sync_add_and_fetch (&tail->n_add_refs, 1);
927
928 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
929 {
930 tail = vlib_get_buffer (vm, tail->next_buffer);
931 goto next_segment;
932 }
933}
934
Pierre Pfister328e99b2016-02-12 13:18:42 +0000935/* Initializes the buffer as an empty packet with no chained buffers. */
936always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400937vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000938{
939 first->total_length_not_including_first_buffer = 0;
940 first->current_length = 0;
941 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
942 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000943}
944
945/* The provided next_bi buffer index is appended to the end of the packet. */
946always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400947vlib_buffer_chain_buffer (vlib_main_t * vm,
948 vlib_buffer_t * first,
949 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000950{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400951 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000952 last->next_buffer = next_bi;
953 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
954 next_buffer->current_length = 0;
955 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000956 return next_buffer;
957}
958
959/* Increases or decreases the packet length.
960 * It does not allocate or deallocate new buffers.
961 * Therefore, the added length must be compatible
962 * with the last buffer. */
963always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400964vlib_buffer_chain_increase_length (vlib_buffer_t * first,
965 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000966{
967 last->current_length += len;
968 if (first != last)
969 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000970}
971
972/* Copy data to the end of the packet and increases its length.
973 * It does not allocate new buffers.
974 * Returns the number of copied bytes. */
975always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400976vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100977 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400978 vlib_buffer_t * first,
979 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000980{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400981 u32 n_buffer_bytes =
982 vlib_buffer_free_list_buffer_size (vm, free_list_index);
983 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
984 u16 len = clib_min (data_len,
985 n_buffer_bytes - last->current_length -
986 last->current_data);
987 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
988 len);
989 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000990 return len;
991}
992
993/* Copy data to the end of the packet and increases its length.
994 * Allocates additional buffers from the free list if necessary.
995 * Returns the number of copied bytes.
996 * 'last' value is modified whenever new buffers are allocated and
997 * chained and points to the last buffer in the chain. */
998u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400999vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +01001000 vlib_buffer_free_list_index_t
1001 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001002 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +01001003 vlib_buffer_t ** last, void *data,
1004 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001005void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001006
Dave Barach9b8ffd92016-07-08 08:13:45 -04001007format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
1008 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001009
Dave Barach9b8ffd92016-07-08 08:13:45 -04001010typedef struct
1011{
Ed Warnickecb9cada2015-12-08 15:45:58 -07001012 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001013 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001014
Damjan Mariond1274cb2018-03-13 21:32:17 +01001015 /* Number of buffers to allocate in each call to allocator. */
1016 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001017
1018 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +01001019 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001020
Dave Barach9b8ffd92016-07-08 08:13:45 -04001021 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001022} vlib_packet_template_t;
1023
1024void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
1025 vlib_packet_template_t * t);
1026
1027void vlib_packet_template_init (vlib_main_t * vm,
1028 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001029 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001030 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +01001031 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001032 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001033
Dave Barach9b8ffd92016-07-08 08:13:45 -04001034void *vlib_packet_template_get_packet (vlib_main_t * vm,
1035 vlib_packet_template_t * t,
1036 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001037
1038always_inline void
1039vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1040{
1041 vec_free (t->packet_data);
1042}
1043
1044always_inline u32
1045unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
1046{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001047 serialize_stream_t *s = &m->stream;
1048 vlib_serialize_buffer_main_t *sm
1049 = uword_to_pointer (m->stream.data_function_opaque,
1050 vlib_serialize_buffer_main_t *);
1051 vlib_main_t *vm = sm->vlib_main;
1052 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001053
1054 n = s->n_buffer_bytes - s->current_buffer_index;
1055 if (sm->last_buffer != ~0)
1056 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001057 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001058 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1059 {
1060 b = vlib_get_buffer (vm, b->next_buffer);
1061 n += b->current_length;
1062 }
1063 }
1064
Dave Barach9b8ffd92016-07-08 08:13:45 -04001065 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001066 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
1067 n += vlib_buffer_index_length_in_chain (vm, f[0]);
1068 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -04001069/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001070
1071 return n;
1072}
1073
Ed Warnickecb9cada2015-12-08 15:45:58 -07001074/* Set a buffer quickly into "uninitialized" state. We want this to
1075 be extremely cheap and arrange for all fields that need to be
1076 initialized to be in the first 128 bits of the buffer. */
1077always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001078vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001079 vlib_buffer_free_list_t * fl)
1080{
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001081 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001082
Damjan Marion19010202016-03-24 17:17:47 +01001083 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001084 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
1085 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
1086 CLIB_CACHE_LINE_BYTES);
1087 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
1088 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +01001089
Ed Warnickecb9cada2015-12-08 15:45:58 -07001090 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +02001091 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001092
Dave Barachf8690282017-03-01 11:38:02 -05001093 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
1094 STRUCT_MARK_PTR (src, template_start),
1095 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
1096 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
1097
1098 /* Not in the first 16 octets. */
1099 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +01001100 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -05001101
Ed Warnickecb9cada2015-12-08 15:45:58 -07001102 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -05001103#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001104 _(current_data);
1105 _(current_length);
1106 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001107#undef _
Florin Corasb2215d62017-08-01 16:56:58 -07001108 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
1109 /* total_length_not_including_first_buffer is not in the template anymore
1110 * so it may actually not zeroed for some buffers. One option is to
1111 * uncomment the line lower (comes at a cost), the other, is to just not
1112 * care */
1113 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001114 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001115}
1116
1117always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001118vlib_buffer_add_to_free_list (vlib_main_t * vm,
1119 vlib_buffer_free_list_t * f,
1120 u32 buffer_index, u8 do_init)
1121{
Damjan Mariond1274cb2018-03-13 21:32:17 +01001122 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001123 vlib_buffer_t *b;
1124 b = vlib_get_buffer (vm, buffer_index);
1125 if (PREDICT_TRUE (do_init))
1126 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001127 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001128
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001129 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001130 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001131 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001132 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001133 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001134 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001135 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001136 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001137 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001138 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001139}
1140
Ed Warnickecb9cada2015-12-08 15:45:58 -07001141#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001142extern u32 *vlib_buffer_state_validation_lock;
1143extern uword *vlib_buffer_state_validation_hash;
1144extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001145#endif
1146
Dave Barach9b8ffd92016-07-08 08:13:45 -04001147static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001148vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1149{
1150#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001151 uword *p;
1152 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001153
1154 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1155
1156 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
1157 ;
1158
1159 p = hash_get (vlib_buffer_state_validation_hash, b);
1160
1161 /* If we don't know about b, declare it to be in the expected state */
1162 if (!p)
1163 {
1164 hash_set (vlib_buffer_state_validation_hash, b, expected);
1165 goto out;
1166 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001167
Ed Warnickecb9cada2015-12-08 15:45:58 -07001168 if (p[0] != expected)
1169 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001170 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001171 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001172 vlib_main_t *vm = &vlib_global_main;
1173
1174 cj_stop ();
1175
Ed Warnickecb9cada2015-12-08 15:45:58 -07001176 bi = vlib_get_buffer_index (vm, b);
1177
1178 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001179 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1180 vlib_time_now (vm), bi,
1181 p[0] ? "busy" : "free", expected ? "busy" : "free");
1182 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001183 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001184out:
1185 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001186 *vlib_buffer_state_validation_lock = 0;
1187 clib_mem_set_heap (oldheap);
1188#endif
1189}
1190
Dave Barach9b8ffd92016-07-08 08:13:45 -04001191static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001192vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1193{
1194#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001195 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001196
1197 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1198
1199 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
1200 ;
1201
1202 hash_set (vlib_buffer_state_validation_hash, b, expected);
1203
Dave Barach9b8ffd92016-07-08 08:13:45 -04001204 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001205 *vlib_buffer_state_validation_lock = 0;
1206 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001207#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001208}
1209
Klement Sekera75e7d132017-09-20 08:26:30 +02001210/** minimum data size of first buffer in a buffer chain */
1211#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1212
1213/**
1214 * @brief compress buffer chain in a way where the first buffer is at least
1215 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1216 *
1217 * @param[in] vm - vlib_main
1218 * @param[in,out] first - first buffer in chain
1219 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1220 * from the chain
1221 */
1222always_inline void
1223vlib_buffer_chain_compress (vlib_main_t * vm,
1224 vlib_buffer_t * first, u32 ** discard_vector)
1225{
1226 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1227 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1228 {
1229 /* this is already big enough or not a chain */
1230 return;
1231 }
1232 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001233 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001234 vlib_buffer_free_list_t *free_list =
1235 vlib_buffer_get_buffer_free_list (vm, first, &index);
1236
1237 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1238 free_list->n_data_bytes -
1239 first->current_data);
1240 do
1241 {
1242 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1243 u32 need = want_first_size - first->current_length;
1244 u32 amount_to_copy = clib_min (need, second->current_length);
1245 clib_memcpy (((u8 *) vlib_buffer_get_current (first)) +
1246 first->current_length,
1247 vlib_buffer_get_current (second), amount_to_copy);
1248 first->current_length += amount_to_copy;
1249 vlib_buffer_advance (second, amount_to_copy);
1250 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1251 {
1252 first->total_length_not_including_first_buffer -= amount_to_copy;
1253 }
1254 if (!second->current_length)
1255 {
1256 vec_add1 (*discard_vector, first->next_buffer);
1257 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1258 {
1259 first->next_buffer = second->next_buffer;
1260 }
1261 else
1262 {
1263 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1264 }
1265 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1266 }
1267 }
1268 while ((first->current_length < want_first_size) &&
1269 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1270}
1271
Ed Warnickecb9cada2015-12-08 15:45:58 -07001272#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001273
1274/*
1275 * fd.io coding-style-patch-verification: ON
1276 *
1277 * Local Variables:
1278 * eval: (c-set-style "gnu")
1279 * End:
1280 */