blob: cda44d6aa20d1844602e7ffe58f6cb52dec81dee [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Marion04a7f052017-07-10 15:06:17 +020059 vlib_buffer_main_t *bm = vm->buffer_main;
60 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
66/** \brief Translate buffer pointer into buffer index
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -040069 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -070070 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -040071*/
Damjan Marion04a7f052017-07-10 15:06:17 +020072
Ed Warnickecb9cada2015-12-08 15:45:58 -070073always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -040074vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070075{
Damjan Marion04a7f052017-07-10 15:06:17 +020076 vlib_buffer_main_t *bm = vm->buffer_main;
77 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
78 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
79 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
82}
83
84/** \brief Get next buffer in buffer linklist, or zero for end of list.
85
86 @param vm - (vlib_main_t *) vlib main data structure pointer
87 @param b - (void *) buffer pointer
88 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -040089*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070090always_inline vlib_buffer_t *
91vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
92{
93 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070095}
96
Dave Barach9b8ffd92016-07-08 08:13:45 -040097uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
98 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
100/** \brief Get length in bytes of the buffer chain
101
102 @param vm - (vlib_main_t *) vlib main data structure pointer
103 @param b - (void *) buffer pointer
104 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400105*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106always_inline uword
107vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
108{
Damjan Marion072401e2017-07-13 18:53:27 +0200109 uword len = b->current_length;
110
111 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
112 return len;
113
114 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
115 return len + b->total_length_not_including_first_buffer;
116
117 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118}
119
120/** \brief Get length in bytes of the buffer index buffer chain
121
122 @param vm - (vlib_main_t *) vlib main data structure pointer
123 @param bi - (u32) buffer index
124 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400125*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126always_inline uword
127vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
128{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400129 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130 return vlib_buffer_length_in_chain (vm, b);
131}
132
133/** \brief Copy buffer contents to memory
134
135 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400136 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 @param contents - (u8 *) memory, <strong>must be large enough</strong>
138 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400139*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140always_inline uword
141vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
142{
143 uword content_len = 0;
144 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400145 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146
147 while (1)
148 {
149 b = vlib_get_buffer (vm, buffer_index);
150 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100151 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400153 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154 break;
155 buffer_index = b->next_buffer;
156 }
157
158 return content_len;
159}
160
161/* Return physical address of buffer->data start. */
162always_inline u64
163vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
164{
Damjan Marioncef87f12017-10-05 15:32:41 +0200165 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion149ba772017-10-12 13:09:26 +0200166 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
Damjan Marioncef87f12017-10-05 15:32:41 +0200167 vlib_buffer_pool_t *pool = vec_elt_at_index (bm->buffer_pools,
168 b->buffer_pool_index);
169
170 return vlib_physmem_virtual_to_physical (vm, pool->physmem_region, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171}
172
173/** \brief Prefetch buffer metadata by buffer index
174 The first 64 bytes of buffer contains most header information
175
176 @param vm - (vlib_main_t *) vlib main data structure pointer
177 @param bi - (u32) buffer index
178 @param type - LOAD, STORE. In most cases, STORE is the right answer
179*/
180/* Prefetch buffer header given index. */
181#define vlib_prefetch_buffer_with_index(vm,bi,type) \
182 do { \
183 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
184 vlib_prefetch_buffer_header (_b, type); \
185 } while (0)
186
187#if 0
188/* Iterate over known allocated vlib bufs. You probably do not want
189 * to do this!
190 @param vm the vlib_main_t
191 @param bi found allocated buffer index
192 @param body operation to perform on buffer index
193 function executes body for each allocated buffer index
194 */
195#define vlib_buffer_foreach_allocated(vm,bi,body) \
196do { \
197 vlib_main_t * _vmain = (vm); \
198 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
199 hash_pair_t * _vbpair; \
200 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
201 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
202 (bi) = _vbpair->key; \
203 body; \
204 } \
205 })); \
206} while (0)
207#endif
208
Dave Barach9b8ffd92016-07-08 08:13:45 -0400209typedef enum
210{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700211 /* Index is unknown. */
212 VLIB_BUFFER_UNKNOWN,
213
214 /* Index is known and free/allocated. */
215 VLIB_BUFFER_KNOWN_FREE,
216 VLIB_BUFFER_KNOWN_ALLOCATED,
217} vlib_buffer_known_state_t;
218
Damjan Marionc8a26c62017-11-24 20:15:23 +0100219void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
220 uword n_buffers,
221 vlib_buffer_known_state_t
222 expected_state);
223
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800225vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700226{
Steven899a84b2018-01-29 20:09:09 -0800227 vlib_buffer_main_t *bm = vlib_global_main.buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228
Damjan Marion6b0f5892017-07-27 04:01:24 -0400229 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400230 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400231 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
233}
234
235always_inline void
Steven899a84b2018-01-29 20:09:09 -0800236vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237 vlib_buffer_known_state_t state)
238{
Steven899a84b2018-01-29 20:09:09 -0800239 vlib_buffer_main_t *bm = vlib_global_main.buffer_main;
240
Damjan Marion6b0f5892017-07-27 04:01:24 -0400241 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400243 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244}
245
246/* Validates sanity of a single buffer.
247 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400248u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
249 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251always_inline u32
252vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253{
254 return round_pow2 (size, sizeof (vlib_buffer_t));
255}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256
Damjan Mariondac03522018-02-01 15:30:13 +0100257always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200258vlib_buffer_get_free_list_index (vlib_buffer_t * b)
259{
Damjan Mariondac03522018-02-01 15:30:13 +0100260 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
261 return b->free_list_index;
262
263 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200264}
265
266always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100267vlib_buffer_set_free_list_index (vlib_buffer_t * b,
268 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200269{
Damjan Mariondac03522018-02-01 15:30:13 +0100270 if (PREDICT_FALSE (index))
271 {
272 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
273 b->free_list_index = index;
274 }
275 else
276 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200277}
278
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279/** \brief Allocate buffers from specific freelist into supplied array
280
281 @param vm - (vlib_main_t *) vlib main data structure pointer
282 @param buffers - (u32 * ) buffer index array
283 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400284 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285 less than the number requested or zero
286*/
Damjan Marion878c6092017-01-04 13:19:27 +0100287always_inline u32
288vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
289 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100290 u32 n_buffers,
291 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100292{
293 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100294 vlib_buffer_free_list_t *fl;
295 u32 *src;
296 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100297
Damjan Marionc8a26c62017-11-24 20:15:23 +0100298 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100299
Damjan Mariondac03522018-02-01 15:30:13 +0100300 fl = pool_elt_at_index (bm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100301
302 len = vec_len (fl->buffers);
303
304 if (PREDICT_FALSE (len < n_buffers))
305 {
306 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
307 len = vec_len (fl->buffers);
308
309 /* even if fill free list didn't manage to refill free list
310 we should give what we have */
311 n_buffers = clib_min (len, n_buffers);
312
313 /* following code is intentionaly duplicated to allow compiler
314 to optimize fast path when n_buffers is constant value */
315 src = fl->buffers + len - n_buffers;
316 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
317 _vec_len (fl->buffers) -= n_buffers;
318
319 /* Verify that buffers are known free. */
320 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
321 VLIB_BUFFER_KNOWN_FREE);
322
323 return n_buffers;
324 }
325
326 src = fl->buffers + len - n_buffers;
327 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
328 _vec_len (fl->buffers) -= n_buffers;
329
330 /* Verify that buffers are known free. */
331 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
332 VLIB_BUFFER_KNOWN_FREE);
333
334 return n_buffers;
335}
336
337/** \brief Allocate buffers into supplied array
338
339 @param vm - (vlib_main_t *) vlib main data structure pointer
340 @param buffers - (u32 * ) buffer index array
341 @param n_buffers - (u32) number of buffers requested
342 @return - (u32) number of buffers actually allocated, may be
343 less than the number requested or zero
344*/
345always_inline u32
346vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
347{
348 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
349 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100350}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700351
Damjan Marionc58408c2018-01-18 14:54:04 +0100352/** \brief Allocate buffers into ring
353
354 @param vm - (vlib_main_t *) vlib main data structure pointer
355 @param buffers - (u32 * ) buffer index ring
356 @param start - (u32) first slot in the ring
357 @param ring_size - (u32) ring size
358 @param n_buffers - (u32) number of buffers requested
359 @return - (u32) number of buffers actually allocated, may be
360 less than the number requested or zero
361*/
362always_inline u32
363vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
364 u32 ring_size, u32 n_buffers)
365{
366 u32 n_alloc;
367
368 ASSERT (n_buffers <= ring_size);
369
370 if (PREDICT_TRUE (start + n_buffers <= ring_size))
371 return vlib_buffer_alloc (vm, ring + start, n_buffers);
372
373 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
374
375 if (PREDICT_TRUE (n_alloc == ring_size - start))
376 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
377
378 return n_alloc;
379}
380
Ed Warnickecb9cada2015-12-08 15:45:58 -0700381/** \brief Free buffers
382 Frees the entire buffer chain for each buffer
383
384 @param vm - (vlib_main_t *) vlib main data structure pointer
385 @param buffers - (u32 * ) buffer index array
386 @param n_buffers - (u32) number of buffers to free
387
388*/
Damjan Marion878c6092017-01-04 13:19:27 +0100389always_inline void
390vlib_buffer_free (vlib_main_t * vm,
391 /* pointer to first buffer */
392 u32 * buffers,
393 /* number of buffers to free */
394 u32 n_buffers)
395{
396 vlib_buffer_main_t *bm = vm->buffer_main;
397
398 ASSERT (bm->cb.vlib_buffer_free_cb);
399
400 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
401}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700402
403/** \brief Free buffers, does not free the buffer chain for each buffer
404
405 @param vm - (vlib_main_t *) vlib main data structure pointer
406 @param buffers - (u32 * ) buffer index array
407 @param n_buffers - (u32) number of buffers to free
408
409*/
Damjan Marion878c6092017-01-04 13:19:27 +0100410always_inline void
411vlib_buffer_free_no_next (vlib_main_t * vm,
412 /* pointer to first buffer */
413 u32 * buffers,
414 /* number of buffers to free */
415 u32 n_buffers)
416{
417 vlib_buffer_main_t *bm = vm->buffer_main;
418
419 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
420
421 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
422}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700423
424/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400425 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426
427 @param vm - (vlib_main_t *) vlib main data structure pointer
428 @param buffer_index - (u32) buffer index to free
429*/
430always_inline void
431vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
432{
433 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
434}
435
436/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100437vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
438 u32 n_data_bytes,
439 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100440always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100441vlib_buffer_delete_free_list (vlib_main_t * vm,
442 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100443{
444 vlib_buffer_main_t *bm = vm->buffer_main;
445
446 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
447
448 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
449}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700450
451/* Find already existing public free list with given size or create one. */
Damjan Mariondac03522018-02-01 15:30:13 +0100452vlib_buffer_free_list_index_t vlib_buffer_get_or_create_free_list (vlib_main_t
453 * vm,
454 u32
455 n_data_bytes,
456 char *fmt,
457 ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100459/* Merge two free lists */
460void vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
461 vlib_buffer_free_list_t * src);
462
463/* Make sure we have at least given number of unaligned buffers. */
464void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
465 vlib_buffer_free_list_t *
466 free_list,
467 uword n_unaligned_buffers);
468
Damjan Mariondac03522018-02-01 15:30:13 +0100469always_inline vlib_buffer_free_list_index_t
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100470vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
471{
472 vlib_buffer_main_t *bm = vm->buffer_main;
473
474 size = vlib_buffer_round_size (size);
475 uword *p = hash_get (bm->free_list_by_size, size);
476 return p ? p[0] : ~0;
477}
478
479always_inline vlib_buffer_free_list_t *
480vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100481 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100482{
483 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Mariondac03522018-02-01 15:30:13 +0100484 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100485
Damjan Marion072401e2017-07-13 18:53:27 +0200486 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100487 return pool_elt_at_index (bm->buffer_free_list_pool, i);
488}
489
Ed Warnickecb9cada2015-12-08 15:45:58 -0700490always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100491vlib_buffer_get_free_list (vlib_main_t * vm,
492 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400494 vlib_buffer_main_t *bm = vm->buffer_main;
495 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700496
497 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
498
499 /* Sanity: indices must match. */
500 ASSERT (f->index == free_list_index);
501
502 return f;
503}
504
505always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100506vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
507 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700508{
Damjan Mariondac03522018-02-01 15:30:13 +0100509 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700510 return f->n_data_bytes;
511}
512
Dave Barach9b8ffd92016-07-08 08:13:45 -0400513void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700514
515/* Reasonably fast buffer copy routine. */
516always_inline void
517vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
518{
519 while (n >= 4)
520 {
521 dst[0] = src[0];
522 dst[1] = src[1];
523 dst[2] = src[2];
524 dst[3] = src[3];
525 dst += 4;
526 src += 4;
527 n -= 4;
528 }
529 while (n > 0)
530 {
531 dst[0] = src[0];
532 dst += 1;
533 src += 1;
534 n -= 1;
535 }
536}
537
Ed Warnickecb9cada2015-12-08 15:45:58 -0700538/* Append given data to end of buffer, possibly allocating new buffers. */
539u32 vlib_buffer_add_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100540 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400541 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700542
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100543/* duplicate all buffers in chain */
544always_inline vlib_buffer_t *
545vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
546{
547 vlib_buffer_t *s, *d, *fd;
548 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100549 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100550 int i;
551
552 s = b;
553 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
554 {
555 n_buffers++;
556 s = vlib_get_buffer (vm, s->next_buffer);
557 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700558 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100559
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100560 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500561
562 /* No guarantee that we'll get all the buffers we asked for */
563 if (PREDICT_FALSE (n_alloc < n_buffers))
564 {
565 if (n_alloc > 0)
566 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500567 return 0;
568 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100569
570 /* 1st segment */
571 s = b;
572 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100573 d->current_data = s->current_data;
574 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100575 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100576 d->total_length_not_including_first_buffer =
577 s->total_length_not_including_first_buffer;
578 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Damjan Mariondce05452016-12-01 11:59:33 +0100579 clib_memcpy (vlib_buffer_get_current (d),
580 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100581
582 /* next segments */
583 for (i = 1; i < n_buffers; i++)
584 {
585 /* previous */
586 d->next_buffer = new_buffers[i];
587 /* current */
588 s = vlib_get_buffer (vm, s->next_buffer);
589 d = vlib_get_buffer (vm, new_buffers[i]);
590 d->current_data = s->current_data;
591 d->current_length = s->current_length;
592 clib_memcpy (vlib_buffer_get_current (d),
593 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100594 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100595 }
596
597 return fd;
598}
599
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800600/** \brief Create a maximum of 256 clones of buffer and store them
601 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100602
603 @param vm - (vlib_main_t *) vlib main data structure pointer
604 @param src_buffer - (u32) source buffer index
605 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800606 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100607 @param head_end_offset - (u16) offset relative to current position
608 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800609 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100610 less than the number requested or zero
611*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800612always_inline u16
613vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
614 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100615{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800616 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100617 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
618
619 ASSERT (s->n_add_refs == 0);
620 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800621 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100622
623 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
624 {
625 buffers[0] = src_buffer;
626 for (i = 1; i < n_buffers; i++)
627 {
628 vlib_buffer_t *d;
629 d = vlib_buffer_copy (vm, s);
630 if (d == 0)
631 return i;
632 buffers[i] = vlib_get_buffer_index (vm, d);
633
634 }
635 return n_buffers;
636 }
637
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800638 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100639 {
640 buffers[0] = src_buffer;
641 return 1;
642 }
643
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800644 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
645 vlib_buffer_get_free_list_index
646 (s));
647
Damjan Marionc47ed032017-01-25 14:18:03 +0100648 for (i = 0; i < n_buffers; i++)
649 {
650 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
651 d->current_data = s->current_data;
652 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200653 vlib_buffer_set_free_list_index (d,
654 vlib_buffer_get_free_list_index (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100655 d->total_length_not_including_first_buffer =
656 s->total_length_not_including_first_buffer + s->current_length -
657 head_end_offset;
658 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
659 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
660 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
661 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
662 head_end_offset);
663 d->next_buffer = src_buffer;
664 }
665 vlib_buffer_advance (s, head_end_offset);
666 s->n_add_refs = n_buffers - 1;
667 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
668 {
669 s = vlib_get_buffer (vm, s->next_buffer);
670 s->n_add_refs = n_buffers - 1;
671 }
672
673 return n_buffers;
674}
675
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800676/** \brief Create multiple clones of buffer and store them
677 in the supplied array
678
679 @param vm - (vlib_main_t *) vlib main data structure pointer
680 @param src_buffer - (u32) source buffer index
681 @param buffers - (u32 * ) buffer index array
682 @param n_buffers - (u16) number of buffer clones requested (<=256)
683 @param head_end_offset - (u16) offset relative to current position
684 where packet head ends
685 @return - (u16) number of buffers actually cloned, may be
686 less than the number requested or zero
687*/
688always_inline u16
689vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
690 u16 n_buffers, u16 head_end_offset)
691{
692 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
693 u16 n_cloned = 0;
694
695 while (n_buffers > 256)
696 {
697 vlib_buffer_t *copy;
698 copy = vlib_buffer_copy (vm, s);
699 n_cloned += vlib_buffer_clone_256 (vm,
700 vlib_get_buffer_index (vm, copy),
701 (buffers + n_cloned),
702 256, head_end_offset);
703 n_buffers -= 256;
704 }
705 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
706 buffers + n_cloned,
707 n_buffers, head_end_offset);
708
709 return n_cloned;
710}
711
Damjan Marionc47ed032017-01-25 14:18:03 +0100712/** \brief Attach cloned tail to the buffer
713
714 @param vm - (vlib_main_t *) vlib main data structure pointer
715 @param head - (vlib_buffer_t *) head buffer
716 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
717*/
718
719always_inline void
720vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
721 vlib_buffer_t * tail)
722{
723 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200724 ASSERT (vlib_buffer_get_free_list_index (head) ==
725 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100726
727 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
728 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
729 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
730 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
731 head->next_buffer = vlib_get_buffer_index (vm, tail);
732 head->total_length_not_including_first_buffer = tail->current_length +
733 tail->total_length_not_including_first_buffer;
734
735next_segment:
736 __sync_add_and_fetch (&tail->n_add_refs, 1);
737
738 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
739 {
740 tail = vlib_get_buffer (vm, tail->next_buffer);
741 goto next_segment;
742 }
743}
744
Pierre Pfister328e99b2016-02-12 13:18:42 +0000745/* Initializes the buffer as an empty packet with no chained buffers. */
746always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400747vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000748{
749 first->total_length_not_including_first_buffer = 0;
750 first->current_length = 0;
751 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
752 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000753}
754
755/* The provided next_bi buffer index is appended to the end of the packet. */
756always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400757vlib_buffer_chain_buffer (vlib_main_t * vm,
758 vlib_buffer_t * first,
759 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000760{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400761 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000762 last->next_buffer = next_bi;
763 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
764 next_buffer->current_length = 0;
765 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000766 return next_buffer;
767}
768
769/* Increases or decreases the packet length.
770 * It does not allocate or deallocate new buffers.
771 * Therefore, the added length must be compatible
772 * with the last buffer. */
773always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400774vlib_buffer_chain_increase_length (vlib_buffer_t * first,
775 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000776{
777 last->current_length += len;
778 if (first != last)
779 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000780}
781
782/* Copy data to the end of the packet and increases its length.
783 * It does not allocate new buffers.
784 * Returns the number of copied bytes. */
785always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400786vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100787 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400788 vlib_buffer_t * first,
789 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000790{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400791 u32 n_buffer_bytes =
792 vlib_buffer_free_list_buffer_size (vm, free_list_index);
793 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
794 u16 len = clib_min (data_len,
795 n_buffer_bytes - last->current_length -
796 last->current_data);
797 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
798 len);
799 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000800 return len;
801}
802
803/* Copy data to the end of the packet and increases its length.
804 * Allocates additional buffers from the free list if necessary.
805 * Returns the number of copied bytes.
806 * 'last' value is modified whenever new buffers are allocated and
807 * chained and points to the last buffer in the chain. */
808u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400809vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100810 vlib_buffer_free_list_index_t
811 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400812 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +0100813 vlib_buffer_t ** last, void *data,
814 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400815void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000816
Dave Barach9b8ffd92016-07-08 08:13:45 -0400817format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
818 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700819
Dave Barach9b8ffd92016-07-08 08:13:45 -0400820typedef struct
821{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700822 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400823 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700824
Ed Warnickecb9cada2015-12-08 15:45:58 -0700825 /* Number of buffers to allocate in each call to physmem
826 allocator. */
827 u32 min_n_buffers_each_physmem_alloc;
828
829 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +0100830 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700831
Dave Barach9b8ffd92016-07-08 08:13:45 -0400832 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700833} vlib_packet_template_t;
834
835void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
836 vlib_packet_template_t * t);
837
838void vlib_packet_template_init (vlib_main_t * vm,
839 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400840 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700841 uword n_packet_data_bytes,
842 uword min_n_buffers_each_physmem_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400843 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700844
Dave Barach9b8ffd92016-07-08 08:13:45 -0400845void *vlib_packet_template_get_packet (vlib_main_t * vm,
846 vlib_packet_template_t * t,
847 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700848
849always_inline void
850vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
851{
852 vec_free (t->packet_data);
853}
854
855always_inline u32
856unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
857{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400858 serialize_stream_t *s = &m->stream;
859 vlib_serialize_buffer_main_t *sm
860 = uword_to_pointer (m->stream.data_function_opaque,
861 vlib_serialize_buffer_main_t *);
862 vlib_main_t *vm = sm->vlib_main;
863 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700864
865 n = s->n_buffer_bytes - s->current_buffer_index;
866 if (sm->last_buffer != ~0)
867 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400868 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700869 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
870 {
871 b = vlib_get_buffer (vm, b->next_buffer);
872 n += b->current_length;
873 }
874 }
875
Dave Barach9b8ffd92016-07-08 08:13:45 -0400876 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700877 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
878 n += vlib_buffer_index_length_in_chain (vm, f[0]);
879 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400880/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700881
882 return n;
883}
884
Ed Warnickecb9cada2015-12-08 15:45:58 -0700885/* Set a buffer quickly into "uninitialized" state. We want this to
886 be extremely cheap and arrange for all fields that need to be
887 initialized to be in the first 128 bits of the buffer. */
888always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100889vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700890 vlib_buffer_free_list_t * fl)
891{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100892 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700893
Damjan Marion19010202016-03-24 17:17:47 +0100894 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400895 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
896 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
897 CLIB_CACHE_LINE_BYTES);
898 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
899 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +0100900
Ed Warnickecb9cada2015-12-08 15:45:58 -0700901 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200902 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700903
Dave Barachf8690282017-03-01 11:38:02 -0500904 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
905 STRUCT_MARK_PTR (src, template_start),
906 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
907 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
908
909 /* Not in the first 16 octets. */
910 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +0100911 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -0500912
Ed Warnickecb9cada2015-12-08 15:45:58 -0700913 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500914#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400915 _(current_data);
916 _(current_length);
917 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700918#undef _
Florin Corasb2215d62017-08-01 16:56:58 -0700919 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
920 /* total_length_not_including_first_buffer is not in the template anymore
921 * so it may actually not zeroed for some buffers. One option is to
922 * uncomment the line lower (comes at a cost), the other, is to just not
923 * care */
924 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +0100925 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700926}
927
928always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100929vlib_buffer_add_to_free_list (vlib_main_t * vm,
930 vlib_buffer_free_list_t * f,
931 u32 buffer_index, u8 do_init)
932{
933 vlib_buffer_t *b;
934 b = vlib_get_buffer (vm, buffer_index);
935 if (PREDICT_TRUE (do_init))
936 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100937 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400938
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200939 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -0400940 {
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200941 vlib_buffer_free_list_t *mf;
942 mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
943 clib_spinlock_lock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400944 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200945 vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
946 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400947 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +0100948 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200949 clib_spinlock_unlock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400950 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100951}
952
Ed Warnickecb9cada2015-12-08 15:45:58 -0700953#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +0100954extern u32 *vlib_buffer_state_validation_lock;
955extern uword *vlib_buffer_state_validation_hash;
956extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700957#endif
958
Dave Barach9b8ffd92016-07-08 08:13:45 -0400959static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700960vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
961{
962#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400963 uword *p;
964 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700965
966 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
967
968 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
969 ;
970
971 p = hash_get (vlib_buffer_state_validation_hash, b);
972
973 /* If we don't know about b, declare it to be in the expected state */
974 if (!p)
975 {
976 hash_set (vlib_buffer_state_validation_hash, b, expected);
977 goto out;
978 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400979
Ed Warnickecb9cada2015-12-08 15:45:58 -0700980 if (p[0] != expected)
981 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400982 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700983 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400984 vlib_main_t *vm = &vlib_global_main;
985
986 cj_stop ();
987
Ed Warnickecb9cada2015-12-08 15:45:58 -0700988 bi = vlib_get_buffer_index (vm, b);
989
990 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400991 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
992 vlib_time_now (vm), bi,
993 p[0] ? "busy" : "free", expected ? "busy" : "free");
994 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700995 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400996out:
997 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700998 *vlib_buffer_state_validation_lock = 0;
999 clib_mem_set_heap (oldheap);
1000#endif
1001}
1002
Dave Barach9b8ffd92016-07-08 08:13:45 -04001003static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001004vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1005{
1006#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001007 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001008
1009 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1010
1011 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
1012 ;
1013
1014 hash_set (vlib_buffer_state_validation_hash, b, expected);
1015
Dave Barach9b8ffd92016-07-08 08:13:45 -04001016 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001017 *vlib_buffer_state_validation_lock = 0;
1018 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001019#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001020}
1021
Klement Sekera75e7d132017-09-20 08:26:30 +02001022/** minimum data size of first buffer in a buffer chain */
1023#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1024
1025/**
1026 * @brief compress buffer chain in a way where the first buffer is at least
1027 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1028 *
1029 * @param[in] vm - vlib_main
1030 * @param[in,out] first - first buffer in chain
1031 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1032 * from the chain
1033 */
1034always_inline void
1035vlib_buffer_chain_compress (vlib_main_t * vm,
1036 vlib_buffer_t * first, u32 ** discard_vector)
1037{
1038 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1039 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1040 {
1041 /* this is already big enough or not a chain */
1042 return;
1043 }
1044 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001045 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001046 vlib_buffer_free_list_t *free_list =
1047 vlib_buffer_get_buffer_free_list (vm, first, &index);
1048
1049 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1050 free_list->n_data_bytes -
1051 first->current_data);
1052 do
1053 {
1054 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1055 u32 need = want_first_size - first->current_length;
1056 u32 amount_to_copy = clib_min (need, second->current_length);
1057 clib_memcpy (((u8 *) vlib_buffer_get_current (first)) +
1058 first->current_length,
1059 vlib_buffer_get_current (second), amount_to_copy);
1060 first->current_length += amount_to_copy;
1061 vlib_buffer_advance (second, amount_to_copy);
1062 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1063 {
1064 first->total_length_not_including_first_buffer -= amount_to_copy;
1065 }
1066 if (!second->current_length)
1067 {
1068 vec_add1 (*discard_vector, first->next_buffer);
1069 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1070 {
1071 first->next_buffer = second->next_buffer;
1072 }
1073 else
1074 {
1075 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1076 }
1077 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1078 }
1079 }
1080 while ((first->current_length < want_first_size) &&
1081 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1082}
1083
Ed Warnickecb9cada2015-12-08 15:45:58 -07001084#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001085
1086/*
1087 * fd.io coding-style-patch-verification: ON
1088 *
1089 * Local Variables:
1090 * eval: (c-set-style "gnu")
1091 * End:
1092 */