blob: 54fc1f61598701a046fef882d5f6b4bcc1dfc33c [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
46/** \file
47 vlib buffer access methods.
48*/
49
50
51/** \brief Translate buffer index into buffer pointer
52
53 @param vm - (vlib_main_t *) vlib main data structure pointer
54 @param buffer_index - (u32) buffer index
55 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040056*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070057always_inline vlib_buffer_t *
58vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
59{
Damjan Mariond1274cb2018-03-13 21:32:17 +010060 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020061 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
62 ASSERT (offset < bm->buffer_mem_size);
63
64 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070065}
66
Damjan Marionafe56de2018-05-17 12:44:00 +020067/** \brief Translate array of buffer indices into buffer pointers with offset
68
69 @param vm - (vlib_main_t *) vlib main data structure pointer
70 @param bi - (u32 *) array of buffer indices
71 @param b - (void **) array to store buffer pointers
72 @param count - (uword) number of elements
73 @param offset - (i32) offset applied to each pointer
74*/
75static_always_inline void
76vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
77 i32 offset)
78{
79#ifdef CLIB_HAVE_VEC256
80 u64x4 off = u64x4_splat (buffer_main.buffer_mem_start + offset);
81 /* if count is not const, compiler will not unroll while loop
82 se we maintain two-in-parallel variant */
83 while (count >= 8)
84 {
85 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
86 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
87 /* shift and add to get vlib_buffer_t pointer */
88 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
89 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
90 b += 8;
91 bi += 8;
92 count -= 8;
93 }
94#endif
95 while (count >= 4)
96 {
97#ifdef CLIB_HAVE_VEC256
98 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
99 /* shift and add to get vlib_buffer_t pointer */
100 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800101#elif defined (CLIB_HAVE_VEC128)
Damjan Marion5df580e2018-07-27 01:47:57 +0200102 u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
103 u32x4 bi4 = u32x4_load_unaligned (bi);
104 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800105#if defined (__aarch64__)
106 u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
107#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200108 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
109 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800110#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200111 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
112 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200113#else
114 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
115 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
116 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
117 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
118#endif
119 b += 4;
120 bi += 4;
121 count -= 4;
122 }
123 while (count)
124 {
125 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
126 b += 1;
127 bi += 1;
128 count -= 1;
129 }
130}
131
132/** \brief Translate array of buffer indices into buffer pointers
133
134 @param vm - (vlib_main_t *) vlib main data structure pointer
135 @param bi - (u32 *) array of buffer indices
136 @param b - (vlib_buffer_t **) array to store buffer pointers
137 @param count - (uword) number of elements
138*/
139
140static_always_inline void
141vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
142{
143 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
144}
145
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146/** \brief Translate buffer pointer into buffer index
147
148 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400149 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400151*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200152
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400154vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100156 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200157 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
158 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
159 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400160 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
162}
163
Damjan Marionafe56de2018-05-17 12:44:00 +0200164/** \brief Translate array of buffer pointers into buffer indices with offset
165
166 @param vm - (vlib_main_t *) vlib main data structure pointer
167 @param b - (void **) array of buffer pointers
168 @param bi - (u32 *) array to store buffer indices
169 @param count - (uword) number of elements
170 @param offset - (i32) offset applied to each pointer
171*/
172static_always_inline void
173vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
174 uword count, i32 offset)
175{
176#ifdef CLIB_HAVE_VEC256
177 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
178 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - offset);
179
180 while (count >= 8)
181 {
182 /* load 4 pointers into 256-bit register */
183 u64x4 v0 = u64x4_load_unaligned (b);
184 u64x4 v1 = u64x4_load_unaligned (b + 4);
185 u32x8 v2, v3;
186
187 v0 -= off4;
188 v1 -= off4;
189
190 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
191 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
192
193 /* permute 256-bit register so lower u32s of each buffer index are
194 * placed into lower 128-bits */
195 v2 = u32x8_permute ((u32x8) v0, mask);
196 v3 = u32x8_permute ((u32x8) v1, mask);
197
198 /* extract lower 128-bits and save them to the array of buffer indices */
199 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
200 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
201 bi += 8;
202 b += 8;
203 count -= 8;
204 }
205#endif
206 while (count >= 4)
207 {
208 /* equivalent non-nector implementation */
209 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
210 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
211 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
212 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
213 bi += 4;
214 b += 4;
215 count -= 4;
216 }
217 while (count)
218 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400219 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200220 bi += 1;
221 b += 1;
222 count -= 1;
223 }
224}
225
226/** \brief Translate array of buffer pointers into buffer indices
227
228 @param vm - (vlib_main_t *) vlib main data structure pointer
229 @param b - (vlib_buffer_t **) array of buffer pointers
230 @param bi - (u32 *) array to store buffer indices
231 @param count - (uword) number of elements
232*/
233static_always_inline void
234vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
235 uword count)
236{
237 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
238}
239
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240/** \brief Get next buffer in buffer linklist, or zero for end of list.
241
242 @param vm - (vlib_main_t *) vlib main data structure pointer
243 @param b - (void *) buffer pointer
244 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246always_inline vlib_buffer_t *
247vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
248{
249 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400250 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251}
252
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
254 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255
256/** \brief Get length in bytes of the buffer chain
257
258 @param vm - (vlib_main_t *) vlib main data structure pointer
259 @param b - (void *) buffer pointer
260 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400261*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262always_inline uword
263vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
264{
Damjan Marion072401e2017-07-13 18:53:27 +0200265 uword len = b->current_length;
266
267 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
268 return len;
269
270 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
271 return len + b->total_length_not_including_first_buffer;
272
273 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274}
275
276/** \brief Get length in bytes of the buffer index buffer chain
277
278 @param vm - (vlib_main_t *) vlib main data structure pointer
279 @param bi - (u32) buffer index
280 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400281*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282always_inline uword
283vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
284{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400285 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286 return vlib_buffer_length_in_chain (vm, b);
287}
288
289/** \brief Copy buffer contents to memory
290
291 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400292 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 @param contents - (u8 *) memory, <strong>must be large enough</strong>
294 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400295*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296always_inline uword
297vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
298{
299 uword content_len = 0;
300 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400301 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
303 while (1)
304 {
305 b = vlib_get_buffer (vm, buffer_index);
306 l = b->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500307 clib_memcpy_fast (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400309 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310 break;
311 buffer_index = b->next_buffer;
312 }
313
314 return content_len;
315}
316
Damjan Marion8f499362018-10-22 13:07:02 +0200317always_inline uword
318vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319{
Damjan Marion68b4da62018-09-30 18:26:20 +0200320 return vlib_physmem_get_pa (vm, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321}
322
Damjan Marion8f499362018-10-22 13:07:02 +0200323always_inline uword
324vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b)
325{
326 return vlib_buffer_get_pa (vm, b) + b->current_data;
327}
328
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329/** \brief Prefetch buffer metadata by buffer index
330 The first 64 bytes of buffer contains most header information
331
332 @param vm - (vlib_main_t *) vlib main data structure pointer
333 @param bi - (u32) buffer index
334 @param type - LOAD, STORE. In most cases, STORE is the right answer
335*/
336/* Prefetch buffer header given index. */
337#define vlib_prefetch_buffer_with_index(vm,bi,type) \
338 do { \
339 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
340 vlib_prefetch_buffer_header (_b, type); \
341 } while (0)
342
Dave Barach9b8ffd92016-07-08 08:13:45 -0400343typedef enum
344{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700345 /* Index is unknown. */
346 VLIB_BUFFER_UNKNOWN,
347
348 /* Index is known and free/allocated. */
349 VLIB_BUFFER_KNOWN_FREE,
350 VLIB_BUFFER_KNOWN_ALLOCATED,
351} vlib_buffer_known_state_t;
352
Damjan Marionc8a26c62017-11-24 20:15:23 +0100353void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
354 uword n_buffers,
355 vlib_buffer_known_state_t
356 expected_state);
357
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800359vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700360{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100361 vlib_buffer_main_t *bm = &buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362
Damjan Marion6b0f5892017-07-27 04:01:24 -0400363 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400364 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400365 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700366 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
367}
368
369always_inline void
Steven899a84b2018-01-29 20:09:09 -0800370vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371 vlib_buffer_known_state_t state)
372{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100373 vlib_buffer_main_t *bm = &buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800374
Damjan Marion6b0f5892017-07-27 04:01:24 -0400375 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400377 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378}
379
380/* Validates sanity of a single buffer.
381 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400382u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
383 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700384
Ed Warnickecb9cada2015-12-08 15:45:58 -0700385always_inline u32
386vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400387{
388 return round_pow2 (size, sizeof (vlib_buffer_t));
389}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390
Damjan Mariondac03522018-02-01 15:30:13 +0100391always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200392vlib_buffer_get_free_list_index (vlib_buffer_t * b)
393{
Damjan Mariondac03522018-02-01 15:30:13 +0100394 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
395 return b->free_list_index;
396
397 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200398}
399
400always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100401vlib_buffer_set_free_list_index (vlib_buffer_t * b,
402 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200403{
Damjan Mariondac03522018-02-01 15:30:13 +0100404 if (PREDICT_FALSE (index))
405 {
406 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
407 b->free_list_index = index;
408 }
409 else
410 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200411}
412
Ed Warnickecb9cada2015-12-08 15:45:58 -0700413/** \brief Allocate buffers from specific freelist into supplied array
414
415 @param vm - (vlib_main_t *) vlib main data structure pointer
416 @param buffers - (u32 * ) buffer index array
417 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400418 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700419 less than the number requested or zero
420*/
Damjan Marion878c6092017-01-04 13:19:27 +0100421always_inline u32
422vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
423 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100424 u32 n_buffers,
425 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100426{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100427 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100428 vlib_buffer_free_list_t *fl;
429 u32 *src;
430 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100431
Damjan Marionc8a26c62017-11-24 20:15:23 +0100432 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100433
Damjan Mariond1274cb2018-03-13 21:32:17 +0100434 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100435
436 len = vec_len (fl->buffers);
437
438 if (PREDICT_FALSE (len < n_buffers))
439 {
440 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100441 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
442 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100443
444 /* even if fill free list didn't manage to refill free list
445 we should give what we have */
446 n_buffers = clib_min (len, n_buffers);
447
448 /* following code is intentionaly duplicated to allow compiler
449 to optimize fast path when n_buffers is constant value */
450 src = fl->buffers + len - n_buffers;
Dave Barach178cf492018-11-13 16:34:13 -0500451 clib_memcpy_fast (buffers, src, n_buffers * sizeof (u32));
Damjan Marionc8a26c62017-11-24 20:15:23 +0100452 _vec_len (fl->buffers) -= n_buffers;
453
454 /* Verify that buffers are known free. */
455 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
456 VLIB_BUFFER_KNOWN_FREE);
457
458 return n_buffers;
459 }
460
461 src = fl->buffers + len - n_buffers;
Dave Barach178cf492018-11-13 16:34:13 -0500462 clib_memcpy_fast (buffers, src, n_buffers * sizeof (u32));
Damjan Marionc8a26c62017-11-24 20:15:23 +0100463 _vec_len (fl->buffers) -= n_buffers;
464
465 /* Verify that buffers are known free. */
466 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
467 VLIB_BUFFER_KNOWN_FREE);
468
469 return n_buffers;
470}
471
472/** \brief Allocate buffers into supplied array
473
474 @param vm - (vlib_main_t *) vlib main data structure pointer
475 @param buffers - (u32 * ) buffer index array
476 @param n_buffers - (u32) number of buffers requested
477 @return - (u32) number of buffers actually allocated, may be
478 less than the number requested or zero
479*/
480always_inline u32
481vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
482{
483 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
484 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100485}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486
Damjan Marionc58408c2018-01-18 14:54:04 +0100487/** \brief Allocate buffers into ring
488
489 @param vm - (vlib_main_t *) vlib main data structure pointer
490 @param buffers - (u32 * ) buffer index ring
491 @param start - (u32) first slot in the ring
492 @param ring_size - (u32) ring size
493 @param n_buffers - (u32) number of buffers requested
494 @return - (u32) number of buffers actually allocated, may be
495 less than the number requested or zero
496*/
497always_inline u32
498vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
499 u32 ring_size, u32 n_buffers)
500{
501 u32 n_alloc;
502
503 ASSERT (n_buffers <= ring_size);
504
505 if (PREDICT_TRUE (start + n_buffers <= ring_size))
506 return vlib_buffer_alloc (vm, ring + start, n_buffers);
507
508 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
509
510 if (PREDICT_TRUE (n_alloc == ring_size - start))
511 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
512
513 return n_alloc;
514}
515
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516/** \brief Free buffers
517 Frees the entire buffer chain for each buffer
518
519 @param vm - (vlib_main_t *) vlib main data structure pointer
520 @param buffers - (u32 * ) buffer index array
521 @param n_buffers - (u32) number of buffers to free
522
523*/
Damjan Marion878c6092017-01-04 13:19:27 +0100524always_inline void
525vlib_buffer_free (vlib_main_t * vm,
526 /* pointer to first buffer */
527 u32 * buffers,
528 /* number of buffers to free */
529 u32 n_buffers)
530{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100531 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100532
533 ASSERT (bm->cb.vlib_buffer_free_cb);
534
535 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
536}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700537
538/** \brief Free buffers, does not free the buffer chain for each buffer
539
540 @param vm - (vlib_main_t *) vlib main data structure pointer
541 @param buffers - (u32 * ) buffer index array
542 @param n_buffers - (u32) number of buffers to free
543
544*/
Damjan Marion878c6092017-01-04 13:19:27 +0100545always_inline void
546vlib_buffer_free_no_next (vlib_main_t * vm,
547 /* pointer to first buffer */
548 u32 * buffers,
549 /* number of buffers to free */
550 u32 n_buffers)
551{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100552 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100553
554 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
555
556 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
557}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700558
559/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400560 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700561
562 @param vm - (vlib_main_t *) vlib main data structure pointer
563 @param buffer_index - (u32) buffer index to free
564*/
565always_inline void
566vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
567{
568 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
569}
570
Damjan Mariona3731492018-02-25 22:50:39 +0100571/** \brief Free buffers from ring
572
573 @param vm - (vlib_main_t *) vlib main data structure pointer
574 @param buffers - (u32 * ) buffer index ring
575 @param start - (u32) first slot in the ring
576 @param ring_size - (u32) ring size
577 @param n_buffers - (u32) number of buffers
578*/
579always_inline void
580vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
581 u32 ring_size, u32 n_buffers)
582{
583 ASSERT (n_buffers <= ring_size);
584
585 if (PREDICT_TRUE (start + n_buffers <= ring_size))
586 {
587 vlib_buffer_free (vm, ring + start, n_buffers);
588 }
589 else
590 {
591 vlib_buffer_free (vm, ring + start, ring_size - start);
592 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
593 }
594}
595
Damjan Marioncef1db92018-03-28 18:27:38 +0200596/** \brief Free buffers from ring without freeing tail buffers
597
598 @param vm - (vlib_main_t *) vlib main data structure pointer
599 @param buffers - (u32 * ) buffer index ring
600 @param start - (u32) first slot in the ring
601 @param ring_size - (u32) ring size
602 @param n_buffers - (u32) number of buffers
603*/
604always_inline void
605vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
606 u32 ring_size, u32 n_buffers)
607{
608 ASSERT (n_buffers <= ring_size);
609
610 if (PREDICT_TRUE (start + n_buffers <= ring_size))
611 {
Damjan Marion4a973932018-06-09 19:29:16 +0200612 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +0200613 }
614 else
615 {
616 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
617 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
618 }
619}
Damjan Mariona3731492018-02-25 22:50:39 +0100620
Ed Warnickecb9cada2015-12-08 15:45:58 -0700621/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100622vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
623 u32 n_data_bytes,
624 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100625always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100626vlib_buffer_delete_free_list (vlib_main_t * vm,
627 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100628{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100629 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100630
631 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
632
633 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
634}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700635
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100636/* Make sure we have at least given number of unaligned buffers. */
637void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
638 vlib_buffer_free_list_t *
639 free_list,
640 uword n_unaligned_buffers);
641
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100642always_inline vlib_buffer_free_list_t *
643vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100644 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100645{
Damjan Mariondac03522018-02-01 15:30:13 +0100646 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100647
Damjan Marion072401e2017-07-13 18:53:27 +0200648 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Mariond1274cb2018-03-13 21:32:17 +0100649 return pool_elt_at_index (vm->buffer_free_list_pool, i);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100650}
651
Ed Warnickecb9cada2015-12-08 15:45:58 -0700652always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100653vlib_buffer_get_free_list (vlib_main_t * vm,
654 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700655{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400656 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700657
Damjan Mariond1274cb2018-03-13 21:32:17 +0100658 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700659
660 /* Sanity: indices must match. */
661 ASSERT (f->index == free_list_index);
662
663 return f;
664}
665
666always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100667vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
668 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700669{
Damjan Mariondac03522018-02-01 15:30:13 +0100670 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700671 return f->n_data_bytes;
672}
673
Ed Warnickecb9cada2015-12-08 15:45:58 -0700674/* Append given data to end of buffer, possibly allocating new buffers. */
675u32 vlib_buffer_add_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100676 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400677 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700678
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100679/* duplicate all buffers in chain */
680always_inline vlib_buffer_t *
681vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
682{
683 vlib_buffer_t *s, *d, *fd;
684 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100685 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100686 int i;
687
688 s = b;
689 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
690 {
691 n_buffers++;
692 s = vlib_get_buffer (vm, s->next_buffer);
693 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700694 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100695
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100696 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500697
698 /* No guarantee that we'll get all the buffers we asked for */
699 if (PREDICT_FALSE (n_alloc < n_buffers))
700 {
701 if (n_alloc > 0)
702 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500703 return 0;
704 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100705
706 /* 1st segment */
707 s = b;
708 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100709 d->current_data = s->current_data;
710 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100711 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100712 d->total_length_not_including_first_buffer =
713 s->total_length_not_including_first_buffer;
Dave Barach178cf492018-11-13 16:34:13 -0500714 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
715 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
716 clib_memcpy_fast (vlib_buffer_get_current (d),
717 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100718
719 /* next segments */
720 for (i = 1; i < n_buffers; i++)
721 {
722 /* previous */
723 d->next_buffer = new_buffers[i];
724 /* current */
725 s = vlib_get_buffer (vm, s->next_buffer);
726 d = vlib_get_buffer (vm, new_buffers[i]);
727 d->current_data = s->current_data;
728 d->current_length = s->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500729 clib_memcpy_fast (vlib_buffer_get_current (d),
730 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100731 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100732 }
733
734 return fd;
735}
736
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800737/** \brief Create a maximum of 256 clones of buffer and store them
738 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100739
740 @param vm - (vlib_main_t *) vlib main data structure pointer
741 @param src_buffer - (u32) source buffer index
742 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800743 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100744 @param head_end_offset - (u16) offset relative to current position
745 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800746 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100747 less than the number requested or zero
748*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800749always_inline u16
750vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
751 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100752{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800753 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100754 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
755
756 ASSERT (s->n_add_refs == 0);
757 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800758 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100759
760 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
761 {
762 buffers[0] = src_buffer;
763 for (i = 1; i < n_buffers; i++)
764 {
765 vlib_buffer_t *d;
766 d = vlib_buffer_copy (vm, s);
767 if (d == 0)
768 return i;
769 buffers[i] = vlib_get_buffer_index (vm, d);
770
771 }
772 return n_buffers;
773 }
774
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800775 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100776 {
777 buffers[0] = src_buffer;
778 return 1;
779 }
780
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800781 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
782 vlib_buffer_get_free_list_index
783 (s));
784
Damjan Marionc47ed032017-01-25 14:18:03 +0100785 for (i = 0; i < n_buffers; i++)
786 {
787 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
788 d->current_data = s->current_data;
789 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200790 vlib_buffer_set_free_list_index (d,
791 vlib_buffer_get_free_list_index (s));
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200792
793 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100794 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200795 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
796 {
797 d->total_length_not_including_first_buffer +=
798 s->total_length_not_including_first_buffer;
799 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100800 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
801 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Dave Barach178cf492018-11-13 16:34:13 -0500802 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
803 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
804 clib_memcpy_fast (vlib_buffer_get_current (d),
805 vlib_buffer_get_current (s), head_end_offset);
Damjan Marionc47ed032017-01-25 14:18:03 +0100806 d->next_buffer = src_buffer;
807 }
808 vlib_buffer_advance (s, head_end_offset);
809 s->n_add_refs = n_buffers - 1;
810 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
811 {
812 s = vlib_get_buffer (vm, s->next_buffer);
813 s->n_add_refs = n_buffers - 1;
814 }
815
816 return n_buffers;
817}
818
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800819/** \brief Create multiple clones of buffer and store them
820 in the supplied array
821
822 @param vm - (vlib_main_t *) vlib main data structure pointer
823 @param src_buffer - (u32) source buffer index
824 @param buffers - (u32 * ) buffer index array
825 @param n_buffers - (u16) number of buffer clones requested (<=256)
826 @param head_end_offset - (u16) offset relative to current position
827 where packet head ends
828 @return - (u16) number of buffers actually cloned, may be
829 less than the number requested or zero
830*/
831always_inline u16
832vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
833 u16 n_buffers, u16 head_end_offset)
834{
835 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
836 u16 n_cloned = 0;
837
838 while (n_buffers > 256)
839 {
840 vlib_buffer_t *copy;
841 copy = vlib_buffer_copy (vm, s);
842 n_cloned += vlib_buffer_clone_256 (vm,
843 vlib_get_buffer_index (vm, copy),
844 (buffers + n_cloned),
845 256, head_end_offset);
846 n_buffers -= 256;
847 }
848 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
849 buffers + n_cloned,
850 n_buffers, head_end_offset);
851
852 return n_cloned;
853}
854
Damjan Marionc47ed032017-01-25 14:18:03 +0100855/** \brief Attach cloned tail to the buffer
856
857 @param vm - (vlib_main_t *) vlib main data structure pointer
858 @param head - (vlib_buffer_t *) head buffer
859 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
860*/
861
862always_inline void
863vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
864 vlib_buffer_t * tail)
865{
866 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200867 ASSERT (vlib_buffer_get_free_list_index (head) ==
868 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100869
870 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
871 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
872 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
873 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
874 head->next_buffer = vlib_get_buffer_index (vm, tail);
875 head->total_length_not_including_first_buffer = tail->current_length +
876 tail->total_length_not_including_first_buffer;
877
878next_segment:
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000879 clib_atomic_add_fetch (&tail->n_add_refs, 1);
Damjan Marionc47ed032017-01-25 14:18:03 +0100880
881 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
882 {
883 tail = vlib_get_buffer (vm, tail->next_buffer);
884 goto next_segment;
885 }
886}
887
Pierre Pfister328e99b2016-02-12 13:18:42 +0000888/* Initializes the buffer as an empty packet with no chained buffers. */
889always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400890vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000891{
892 first->total_length_not_including_first_buffer = 0;
893 first->current_length = 0;
894 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
895 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000896}
897
898/* The provided next_bi buffer index is appended to the end of the packet. */
899always_inline vlib_buffer_t *
Eyal Barib688fb12018-11-12 16:13:49 +0200900vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000901{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400902 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000903 last->next_buffer = next_bi;
904 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
905 next_buffer->current_length = 0;
906 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000907 return next_buffer;
908}
909
910/* Increases or decreases the packet length.
911 * It does not allocate or deallocate new buffers.
912 * Therefore, the added length must be compatible
913 * with the last buffer. */
914always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400915vlib_buffer_chain_increase_length (vlib_buffer_t * first,
916 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000917{
918 last->current_length += len;
919 if (first != last)
920 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000921}
922
923/* Copy data to the end of the packet and increases its length.
924 * It does not allocate new buffers.
925 * Returns the number of copied bytes. */
926always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400927vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100928 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400929 vlib_buffer_t * first,
930 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000931{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400932 u32 n_buffer_bytes =
933 vlib_buffer_free_list_buffer_size (vm, free_list_index);
934 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
935 u16 len = clib_min (data_len,
936 n_buffer_bytes - last->current_length -
937 last->current_data);
Dave Barach178cf492018-11-13 16:34:13 -0500938 clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length,
939 data, len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400940 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000941 return len;
942}
943
944/* Copy data to the end of the packet and increases its length.
945 * Allocates additional buffers from the free list if necessary.
946 * Returns the number of copied bytes.
947 * 'last' value is modified whenever new buffers are allocated and
948 * chained and points to the last buffer in the chain. */
949u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400950vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100951 vlib_buffer_free_list_index_t
952 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400953 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +0100954 vlib_buffer_t ** last, void *data,
955 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400956void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000957
Dave Barach9b8ffd92016-07-08 08:13:45 -0400958format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
959 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700960
Dave Barach9b8ffd92016-07-08 08:13:45 -0400961typedef struct
962{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700963 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400964 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700965
Damjan Mariond1274cb2018-03-13 21:32:17 +0100966 /* Number of buffers to allocate in each call to allocator. */
967 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700968
969 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +0100970 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700971
Dave Barach9b8ffd92016-07-08 08:13:45 -0400972 u32 *free_buffers;
Damjan Marion671e60e2018-12-30 18:09:59 +0100973
974 u8 *name;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700975} vlib_packet_template_t;
976
977void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
978 vlib_packet_template_t * t);
979
980void vlib_packet_template_init (vlib_main_t * vm,
981 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400982 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700983 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +0100984 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400985 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700986
Dave Barach9b8ffd92016-07-08 08:13:45 -0400987void *vlib_packet_template_get_packet (vlib_main_t * vm,
988 vlib_packet_template_t * t,
989 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700990
991always_inline void
992vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
993{
994 vec_free (t->packet_data);
995}
996
Ed Warnickecb9cada2015-12-08 15:45:58 -0700997/* Set a buffer quickly into "uninitialized" state. We want this to
998 be extremely cheap and arrange for all fields that need to be
999 initialized to be in the first 128 bits of the buffer. */
1000always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001001vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001002 vlib_buffer_free_list_t * fl)
1003{
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001004 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001005
Damjan Marion19010202016-03-24 17:17:47 +01001006 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001007 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
1008 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
1009 CLIB_CACHE_LINE_BYTES);
1010 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
1011 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +01001012
Ed Warnickecb9cada2015-12-08 15:45:58 -07001013 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +02001014 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015
Dave Barach178cf492018-11-13 16:34:13 -05001016 clib_memcpy_fast (STRUCT_MARK_PTR (dst, template_start),
1017 STRUCT_MARK_PTR (src, template_start),
1018 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
1019 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
Dave Barachf8690282017-03-01 11:38:02 -05001020
1021 /* Not in the first 16 octets. */
1022 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +01001023 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -05001024
Ed Warnickecb9cada2015-12-08 15:45:58 -07001025 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -05001026#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001027 _(current_data);
1028 _(current_length);
1029 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001030#undef _
Florin Corasb2215d62017-08-01 16:56:58 -07001031 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
1032 /* total_length_not_including_first_buffer is not in the template anymore
1033 * so it may actually not zeroed for some buffers. One option is to
1034 * uncomment the line lower (comes at a cost), the other, is to just not
1035 * care */
1036 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001037 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001038}
1039
1040always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001041vlib_buffer_add_to_free_list (vlib_main_t * vm,
1042 vlib_buffer_free_list_t * f,
1043 u32 buffer_index, u8 do_init)
1044{
Damjan Mariond1274cb2018-03-13 21:32:17 +01001045 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001046 vlib_buffer_t *b;
1047 b = vlib_get_buffer (vm, buffer_index);
1048 if (PREDICT_TRUE (do_init))
1049 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001050 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001051
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001052 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001053 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001054 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001055 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001056 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001057 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001058 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001059 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001060 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001061 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001062}
1063
Ed Warnickecb9cada2015-12-08 15:45:58 -07001064#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001065extern u32 *vlib_buffer_state_validation_lock;
1066extern uword *vlib_buffer_state_validation_hash;
1067extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001068#endif
1069
Dave Barach9b8ffd92016-07-08 08:13:45 -04001070static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001071vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1072{
1073#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001074 uword *p;
1075 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001076
1077 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1078
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001079 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001080 ;
1081
1082 p = hash_get (vlib_buffer_state_validation_hash, b);
1083
1084 /* If we don't know about b, declare it to be in the expected state */
1085 if (!p)
1086 {
1087 hash_set (vlib_buffer_state_validation_hash, b, expected);
1088 goto out;
1089 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001090
Ed Warnickecb9cada2015-12-08 15:45:58 -07001091 if (p[0] != expected)
1092 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001093 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001094 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001095 vlib_main_t *vm = &vlib_global_main;
1096
1097 cj_stop ();
1098
Ed Warnickecb9cada2015-12-08 15:45:58 -07001099 bi = vlib_get_buffer_index (vm, b);
1100
1101 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001102 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1103 vlib_time_now (vm), bi,
1104 p[0] ? "busy" : "free", expected ? "busy" : "free");
1105 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001106 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001107out:
1108 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001109 *vlib_buffer_state_validation_lock = 0;
1110 clib_mem_set_heap (oldheap);
1111#endif
1112}
1113
Dave Barach9b8ffd92016-07-08 08:13:45 -04001114static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001115vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1116{
1117#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001118 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001119
1120 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1121
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001122 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001123 ;
1124
1125 hash_set (vlib_buffer_state_validation_hash, b, expected);
1126
Dave Barach9b8ffd92016-07-08 08:13:45 -04001127 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001128 *vlib_buffer_state_validation_lock = 0;
1129 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001130#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001131}
1132
Klement Sekera75e7d132017-09-20 08:26:30 +02001133/** minimum data size of first buffer in a buffer chain */
1134#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1135
1136/**
1137 * @brief compress buffer chain in a way where the first buffer is at least
1138 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1139 *
1140 * @param[in] vm - vlib_main
1141 * @param[in,out] first - first buffer in chain
1142 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1143 * from the chain
1144 */
1145always_inline void
1146vlib_buffer_chain_compress (vlib_main_t * vm,
1147 vlib_buffer_t * first, u32 ** discard_vector)
1148{
1149 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1150 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1151 {
1152 /* this is already big enough or not a chain */
1153 return;
1154 }
1155 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001156 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001157 vlib_buffer_free_list_t *free_list =
1158 vlib_buffer_get_buffer_free_list (vm, first, &index);
1159
1160 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1161 free_list->n_data_bytes -
1162 first->current_data);
1163 do
1164 {
1165 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1166 u32 need = want_first_size - first->current_length;
1167 u32 amount_to_copy = clib_min (need, second->current_length);
Dave Barach178cf492018-11-13 16:34:13 -05001168 clib_memcpy_fast (((u8 *) vlib_buffer_get_current (first)) +
1169 first->current_length,
1170 vlib_buffer_get_current (second), amount_to_copy);
Klement Sekera75e7d132017-09-20 08:26:30 +02001171 first->current_length += amount_to_copy;
Klement Sekera69db1a62018-12-11 16:55:33 +01001172 second->current_data += amount_to_copy;
1173 second->current_length -= amount_to_copy;
Klement Sekera75e7d132017-09-20 08:26:30 +02001174 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1175 {
1176 first->total_length_not_including_first_buffer -= amount_to_copy;
1177 }
1178 if (!second->current_length)
1179 {
1180 vec_add1 (*discard_vector, first->next_buffer);
1181 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1182 {
1183 first->next_buffer = second->next_buffer;
1184 }
1185 else
1186 {
1187 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1188 }
1189 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1190 }
1191 }
1192 while ((first->current_length < want_first_size) &&
1193 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1194}
1195
Eyal Barid3d42412018-11-05 13:29:25 +02001196/**
1197 * @brief linearize buffer chain - the first buffer is filled, if needed,
1198 * buffers are allocated and filled, returns free space in last buffer or
1199 * negative on failure
1200 *
1201 * @param[in] vm - vlib_main
1202 * @param[in,out] first - first buffer in chain
1203 */
1204always_inline int
1205vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * first)
1206{
1207 vlib_buffer_t *b = first;
1208 vlib_buffer_free_list_t *fl =
1209 vlib_buffer_get_free_list (vm, vlib_buffer_get_free_list_index (b));
1210 u32 buf_len = fl->n_data_bytes;
1211 // free buffer chain starting from the second buffer
1212 int free_count = (b->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
1213 u32 chain_to_free = b->next_buffer;
1214
1215 u32 len = vlib_buffer_length_in_chain (vm, b);
1216 u32 free_len = buf_len - b->current_data - b->current_length;
1217 int alloc_len = clib_max (len - free_len, 0); //use the free len in the first buffer
1218 int n_buffers = (alloc_len + buf_len - 1) / buf_len;
1219 u32 new_buffers[n_buffers];
1220
1221 u32 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
1222 if (n_alloc != n_buffers)
1223 {
1224 vlib_buffer_free_no_next (vm, new_buffers, n_alloc);
1225 return -1;
1226 }
1227
1228 vlib_buffer_t *s = b;
1229 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1230 {
1231 s = vlib_get_buffer (vm, s->next_buffer);
1232 int d_free_len = buf_len - b->current_data - b->current_length;
1233 ASSERT (d_free_len >= 0);
1234 // chain buf and split write
1235 u32 copy_len = clib_min (d_free_len, s->current_length);
1236 u8 *d = vlib_buffer_put_uninit (b, copy_len);
1237 clib_memcpy (d, vlib_buffer_get_current (s), copy_len);
1238 int rest = s->current_length - copy_len;
1239 if (rest > 0)
1240 {
1241 //prev buf is full
1242 ASSERT (vlib_buffer_get_tail (b) == b->data + buf_len);
1243 ASSERT (n_buffers > 0);
1244 b = vlib_buffer_chain_buffer (vm, b, new_buffers[--n_buffers]);
1245 //make full use of the new buffers
1246 b->current_data = 0;
1247 d = vlib_buffer_put_uninit (b, rest);
1248 clib_memcpy (d, vlib_buffer_get_current (s) + copy_len, rest);
1249 }
1250 }
1251 vlib_buffer_free (vm, &chain_to_free, free_count);
1252 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1253 if (b == first) /* no buffers addeed */
1254 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1255 ASSERT (len == vlib_buffer_length_in_chain (vm, first));
1256 ASSERT (n_buffers == 0);
1257 return buf_len - b->current_data - b->current_length;
1258}
1259
Ed Warnickecb9cada2015-12-08 15:45:58 -07001260#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001261
1262/*
1263 * fd.io coding-style-patch-verification: ON
1264 *
1265 * Local Variables:
1266 * eval: (c-set-style "gnu")
1267 * End:
1268 */