blob: 1ea3c0829b8f3b9440e1e11377ea6331ebb3feb4 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Marion04a7f052017-07-10 15:06:17 +020059 vlib_buffer_main_t *bm = vm->buffer_main;
60 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
66/** \brief Translate buffer pointer into buffer index
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -040069 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -070070 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -040071*/
Damjan Marion04a7f052017-07-10 15:06:17 +020072
Ed Warnickecb9cada2015-12-08 15:45:58 -070073always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -040074vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070075{
Damjan Marion04a7f052017-07-10 15:06:17 +020076 vlib_buffer_main_t *bm = vm->buffer_main;
77 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
78 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
79 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
82}
83
84/** \brief Get next buffer in buffer linklist, or zero for end of list.
85
86 @param vm - (vlib_main_t *) vlib main data structure pointer
87 @param b - (void *) buffer pointer
88 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -040089*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070090always_inline vlib_buffer_t *
91vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
92{
93 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070095}
96
Dave Barach9b8ffd92016-07-08 08:13:45 -040097uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
98 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
100/** \brief Get length in bytes of the buffer chain
101
102 @param vm - (vlib_main_t *) vlib main data structure pointer
103 @param b - (void *) buffer pointer
104 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400105*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106always_inline uword
107vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
108{
Damjan Marion072401e2017-07-13 18:53:27 +0200109 uword len = b->current_length;
110
111 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
112 return len;
113
114 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
115 return len + b->total_length_not_including_first_buffer;
116
117 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118}
119
120/** \brief Get length in bytes of the buffer index buffer chain
121
122 @param vm - (vlib_main_t *) vlib main data structure pointer
123 @param bi - (u32) buffer index
124 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400125*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126always_inline uword
127vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
128{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400129 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130 return vlib_buffer_length_in_chain (vm, b);
131}
132
133/** \brief Copy buffer contents to memory
134
135 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400136 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 @param contents - (u8 *) memory, <strong>must be large enough</strong>
138 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400139*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140always_inline uword
141vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
142{
143 uword content_len = 0;
144 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400145 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146
147 while (1)
148 {
149 b = vlib_get_buffer (vm, buffer_index);
150 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100151 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400153 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154 break;
155 buffer_index = b->next_buffer;
156 }
157
158 return content_len;
159}
160
161/* Return physical address of buffer->data start. */
162always_inline u64
163vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
164{
Damjan Marioncef87f12017-10-05 15:32:41 +0200165 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion149ba772017-10-12 13:09:26 +0200166 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
Damjan Marioncef87f12017-10-05 15:32:41 +0200167 vlib_buffer_pool_t *pool = vec_elt_at_index (bm->buffer_pools,
168 b->buffer_pool_index);
169
170 return vlib_physmem_virtual_to_physical (vm, pool->physmem_region, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171}
172
173/** \brief Prefetch buffer metadata by buffer index
174 The first 64 bytes of buffer contains most header information
175
176 @param vm - (vlib_main_t *) vlib main data structure pointer
177 @param bi - (u32) buffer index
178 @param type - LOAD, STORE. In most cases, STORE is the right answer
179*/
180/* Prefetch buffer header given index. */
181#define vlib_prefetch_buffer_with_index(vm,bi,type) \
182 do { \
183 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
184 vlib_prefetch_buffer_header (_b, type); \
185 } while (0)
186
187#if 0
188/* Iterate over known allocated vlib bufs. You probably do not want
189 * to do this!
190 @param vm the vlib_main_t
191 @param bi found allocated buffer index
192 @param body operation to perform on buffer index
193 function executes body for each allocated buffer index
194 */
195#define vlib_buffer_foreach_allocated(vm,bi,body) \
196do { \
197 vlib_main_t * _vmain = (vm); \
198 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
199 hash_pair_t * _vbpair; \
200 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
201 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
202 (bi) = _vbpair->key; \
203 body; \
204 } \
205 })); \
206} while (0)
207#endif
208
Dave Barach9b8ffd92016-07-08 08:13:45 -0400209typedef enum
210{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700211 /* Index is unknown. */
212 VLIB_BUFFER_UNKNOWN,
213
214 /* Index is known and free/allocated. */
215 VLIB_BUFFER_KNOWN_FREE,
216 VLIB_BUFFER_KNOWN_ALLOCATED,
217} vlib_buffer_known_state_t;
218
Damjan Marionc8a26c62017-11-24 20:15:23 +0100219void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
220 uword n_buffers,
221 vlib_buffer_known_state_t
222 expected_state);
223
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224always_inline vlib_buffer_known_state_t
225vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
226{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400227 vlib_buffer_main_t *bm = vm->buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228
Damjan Marion6b0f5892017-07-27 04:01:24 -0400229 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400230 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400231 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
233}
234
235always_inline void
236vlib_buffer_set_known_state (vlib_main_t * vm,
237 u32 buffer_index,
238 vlib_buffer_known_state_t state)
239{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400240 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion6b0f5892017-07-27 04:01:24 -0400241 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400243 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244}
245
246/* Validates sanity of a single buffer.
247 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400248u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
249 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251always_inline u32
252vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253{
254 return round_pow2 (size, sizeof (vlib_buffer_t));
255}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256
Damjan Marion072401e2017-07-13 18:53:27 +0200257always_inline u32
258vlib_buffer_get_free_list_index (vlib_buffer_t * b)
259{
260 return b->flags & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
261}
262
263always_inline void
264vlib_buffer_set_free_list_index (vlib_buffer_t * b, u32 index)
265{
266 /* if there is an need for more free lists we should consider
267 storig data in the 2nd cacheline */
268 ASSERT (VLIB_BUFFER_FREE_LIST_INDEX_MASK & 1);
269 ASSERT (index <= VLIB_BUFFER_FREE_LIST_INDEX_MASK);
270
271 b->flags &= ~VLIB_BUFFER_FREE_LIST_INDEX_MASK;
272 b->flags |= index & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
273}
274
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275/** \brief Allocate buffers from specific freelist into supplied array
276
277 @param vm - (vlib_main_t *) vlib main data structure pointer
278 @param buffers - (u32 * ) buffer index array
279 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400280 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281 less than the number requested or zero
282*/
Damjan Marion878c6092017-01-04 13:19:27 +0100283always_inline u32
284vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
285 u32 * buffers,
286 u32 n_buffers, u32 free_list_index)
287{
288 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100289 vlib_buffer_free_list_t *fl;
290 u32 *src;
291 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100292
Damjan Marionc8a26c62017-11-24 20:15:23 +0100293 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100294
Damjan Marionc8a26c62017-11-24 20:15:23 +0100295 fl = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
296
297 len = vec_len (fl->buffers);
298
299 if (PREDICT_FALSE (len < n_buffers))
300 {
301 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
302 len = vec_len (fl->buffers);
303
304 /* even if fill free list didn't manage to refill free list
305 we should give what we have */
306 n_buffers = clib_min (len, n_buffers);
307
308 /* following code is intentionaly duplicated to allow compiler
309 to optimize fast path when n_buffers is constant value */
310 src = fl->buffers + len - n_buffers;
311 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
312 _vec_len (fl->buffers) -= n_buffers;
313
314 /* Verify that buffers are known free. */
315 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
316 VLIB_BUFFER_KNOWN_FREE);
317
318 return n_buffers;
319 }
320
321 src = fl->buffers + len - n_buffers;
322 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
323 _vec_len (fl->buffers) -= n_buffers;
324
325 /* Verify that buffers are known free. */
326 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
327 VLIB_BUFFER_KNOWN_FREE);
328
329 return n_buffers;
330}
331
332/** \brief Allocate buffers into supplied array
333
334 @param vm - (vlib_main_t *) vlib main data structure pointer
335 @param buffers - (u32 * ) buffer index array
336 @param n_buffers - (u32) number of buffers requested
337 @return - (u32) number of buffers actually allocated, may be
338 less than the number requested or zero
339*/
340always_inline u32
341vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
342{
343 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
344 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100345}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346
347/** \brief Free buffers
348 Frees the entire buffer chain for each buffer
349
350 @param vm - (vlib_main_t *) vlib main data structure pointer
351 @param buffers - (u32 * ) buffer index array
352 @param n_buffers - (u32) number of buffers to free
353
354*/
Damjan Marion878c6092017-01-04 13:19:27 +0100355always_inline void
356vlib_buffer_free (vlib_main_t * vm,
357 /* pointer to first buffer */
358 u32 * buffers,
359 /* number of buffers to free */
360 u32 n_buffers)
361{
362 vlib_buffer_main_t *bm = vm->buffer_main;
363
364 ASSERT (bm->cb.vlib_buffer_free_cb);
365
366 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
367}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368
369/** \brief Free buffers, does not free the buffer chain for each buffer
370
371 @param vm - (vlib_main_t *) vlib main data structure pointer
372 @param buffers - (u32 * ) buffer index array
373 @param n_buffers - (u32) number of buffers to free
374
375*/
Damjan Marion878c6092017-01-04 13:19:27 +0100376always_inline void
377vlib_buffer_free_no_next (vlib_main_t * vm,
378 /* pointer to first buffer */
379 u32 * buffers,
380 /* number of buffers to free */
381 u32 n_buffers)
382{
383 vlib_buffer_main_t *bm = vm->buffer_main;
384
385 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
386
387 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
388}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700389
390/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400391 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700392
393 @param vm - (vlib_main_t *) vlib main data structure pointer
394 @param buffer_index - (u32) buffer index to free
395*/
396always_inline void
397vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
398{
399 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
400}
401
402/* Add/delete buffer free lists. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400403u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
404 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100405always_inline void
406vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
407{
408 vlib_buffer_main_t *bm = vm->buffer_main;
409
410 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
411
412 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
413}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414
415/* Find already existing public free list with given size or create one. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400416u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
417 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700418
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100419/* Merge two free lists */
420void vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
421 vlib_buffer_free_list_t * src);
422
423/* Make sure we have at least given number of unaligned buffers. */
424void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
425 vlib_buffer_free_list_t *
426 free_list,
427 uword n_unaligned_buffers);
428
429always_inline u32
430vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
431{
432 vlib_buffer_main_t *bm = vm->buffer_main;
433
434 size = vlib_buffer_round_size (size);
435 uword *p = hash_get (bm->free_list_by_size, size);
436 return p ? p[0] : ~0;
437}
438
439always_inline vlib_buffer_free_list_t *
440vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
441 u32 * index)
442{
443 vlib_buffer_main_t *bm = vm->buffer_main;
444 u32 i;
445
Damjan Marion072401e2017-07-13 18:53:27 +0200446 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100447 return pool_elt_at_index (bm->buffer_free_list_pool, i);
448}
449
Ed Warnickecb9cada2015-12-08 15:45:58 -0700450always_inline vlib_buffer_free_list_t *
451vlib_buffer_get_free_list (vlib_main_t * vm, u32 free_list_index)
452{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400453 vlib_buffer_main_t *bm = vm->buffer_main;
454 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455
456 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
457
458 /* Sanity: indices must match. */
459 ASSERT (f->index == free_list_index);
460
461 return f;
462}
463
464always_inline u32
465vlib_buffer_free_list_buffer_size (vlib_main_t * vm, u32 free_list_index)
466{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400467 vlib_buffer_free_list_t *f =
468 vlib_buffer_get_free_list (vm, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700469 return f->n_data_bytes;
470}
471
Dave Barach9b8ffd92016-07-08 08:13:45 -0400472void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700473
474/* Reasonably fast buffer copy routine. */
475always_inline void
476vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
477{
478 while (n >= 4)
479 {
480 dst[0] = src[0];
481 dst[1] = src[1];
482 dst[2] = src[2];
483 dst[3] = src[3];
484 dst += 4;
485 src += 4;
486 n -= 4;
487 }
488 while (n > 0)
489 {
490 dst[0] = src[0];
491 dst += 1;
492 src += 1;
493 n -= 1;
494 }
495}
496
Ed Warnickecb9cada2015-12-08 15:45:58 -0700497/* Append given data to end of buffer, possibly allocating new buffers. */
498u32 vlib_buffer_add_data (vlib_main_t * vm,
499 u32 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400500 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700501
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100502/* duplicate all buffers in chain */
503always_inline vlib_buffer_t *
504vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
505{
506 vlib_buffer_t *s, *d, *fd;
507 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100508 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100509 int i;
510
511 s = b;
512 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
513 {
514 n_buffers++;
515 s = vlib_get_buffer (vm, s->next_buffer);
516 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700517 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100518
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100519 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500520
521 /* No guarantee that we'll get all the buffers we asked for */
522 if (PREDICT_FALSE (n_alloc < n_buffers))
523 {
524 if (n_alloc > 0)
525 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500526 return 0;
527 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100528
529 /* 1st segment */
530 s = b;
531 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100532 d->current_data = s->current_data;
533 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100534 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100535 d->total_length_not_including_first_buffer =
536 s->total_length_not_including_first_buffer;
537 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Damjan Mariondce05452016-12-01 11:59:33 +0100538 clib_memcpy (vlib_buffer_get_current (d),
539 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100540
541 /* next segments */
542 for (i = 1; i < n_buffers; i++)
543 {
544 /* previous */
545 d->next_buffer = new_buffers[i];
546 /* current */
547 s = vlib_get_buffer (vm, s->next_buffer);
548 d = vlib_get_buffer (vm, new_buffers[i]);
549 d->current_data = s->current_data;
550 d->current_length = s->current_length;
551 clib_memcpy (vlib_buffer_get_current (d),
552 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100553 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100554 }
555
556 return fd;
557}
558
Damjan Marionc47ed032017-01-25 14:18:03 +0100559/** \brief Create multiple clones of buffer and store them in the supplied array
560
561 @param vm - (vlib_main_t *) vlib main data structure pointer
562 @param src_buffer - (u32) source buffer index
563 @param buffers - (u32 * ) buffer index array
564 @param n_buffers - (u8) number of buffer clones requested
565 @param head_end_offset - (u16) offset relative to current position
566 where packet head ends
567 @return - (u8) number of buffers actually cloned, may be
568 less than the number requested or zero
569*/
570
571always_inline u8
572vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
573 u8 n_buffers, u16 head_end_offset)
574{
575 u8 i;
576 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
577
578 ASSERT (s->n_add_refs == 0);
579 ASSERT (n_buffers);
580
581 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
582 {
583 buffers[0] = src_buffer;
584 for (i = 1; i < n_buffers; i++)
585 {
586 vlib_buffer_t *d;
587 d = vlib_buffer_copy (vm, s);
588 if (d == 0)
589 return i;
590 buffers[i] = vlib_get_buffer_index (vm, d);
591
592 }
593 return n_buffers;
594 }
595
596 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
Damjan Marion072401e2017-07-13 18:53:27 +0200597 vlib_buffer_get_free_list_index
598 (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100599 if (PREDICT_FALSE (n_buffers == 0))
600 {
601 buffers[0] = src_buffer;
602 return 1;
603 }
604
605 for (i = 0; i < n_buffers; i++)
606 {
607 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
608 d->current_data = s->current_data;
609 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200610 vlib_buffer_set_free_list_index (d,
611 vlib_buffer_get_free_list_index (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100612 d->total_length_not_including_first_buffer =
613 s->total_length_not_including_first_buffer + s->current_length -
614 head_end_offset;
615 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
616 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
617 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
618 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
619 head_end_offset);
620 d->next_buffer = src_buffer;
621 }
622 vlib_buffer_advance (s, head_end_offset);
623 s->n_add_refs = n_buffers - 1;
624 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
625 {
626 s = vlib_get_buffer (vm, s->next_buffer);
627 s->n_add_refs = n_buffers - 1;
628 }
629
630 return n_buffers;
631}
632
633/** \brief Attach cloned tail to the buffer
634
635 @param vm - (vlib_main_t *) vlib main data structure pointer
636 @param head - (vlib_buffer_t *) head buffer
637 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
638*/
639
640always_inline void
641vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
642 vlib_buffer_t * tail)
643{
644 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200645 ASSERT (vlib_buffer_get_free_list_index (head) ==
646 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100647
648 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
649 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
650 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
651 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
652 head->next_buffer = vlib_get_buffer_index (vm, tail);
653 head->total_length_not_including_first_buffer = tail->current_length +
654 tail->total_length_not_including_first_buffer;
655
656next_segment:
657 __sync_add_and_fetch (&tail->n_add_refs, 1);
658
659 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
660 {
661 tail = vlib_get_buffer (vm, tail->next_buffer);
662 goto next_segment;
663 }
664}
665
Pierre Pfister328e99b2016-02-12 13:18:42 +0000666/* Initializes the buffer as an empty packet with no chained buffers. */
667always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400668vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000669{
670 first->total_length_not_including_first_buffer = 0;
671 first->current_length = 0;
672 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
673 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000674}
675
676/* The provided next_bi buffer index is appended to the end of the packet. */
677always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400678vlib_buffer_chain_buffer (vlib_main_t * vm,
679 vlib_buffer_t * first,
680 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000681{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400682 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000683 last->next_buffer = next_bi;
684 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
685 next_buffer->current_length = 0;
686 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000687 return next_buffer;
688}
689
690/* Increases or decreases the packet length.
691 * It does not allocate or deallocate new buffers.
692 * Therefore, the added length must be compatible
693 * with the last buffer. */
694always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400695vlib_buffer_chain_increase_length (vlib_buffer_t * first,
696 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000697{
698 last->current_length += len;
699 if (first != last)
700 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000701}
702
703/* Copy data to the end of the packet and increases its length.
704 * It does not allocate new buffers.
705 * Returns the number of copied bytes. */
706always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400707vlib_buffer_chain_append_data (vlib_main_t * vm,
708 u32 free_list_index,
709 vlib_buffer_t * first,
710 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000711{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400712 u32 n_buffer_bytes =
713 vlib_buffer_free_list_buffer_size (vm, free_list_index);
714 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
715 u16 len = clib_min (data_len,
716 n_buffer_bytes - last->current_length -
717 last->current_data);
718 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
719 len);
720 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000721 return len;
722}
723
724/* Copy data to the end of the packet and increases its length.
725 * Allocates additional buffers from the free list if necessary.
726 * Returns the number of copied bytes.
727 * 'last' value is modified whenever new buffers are allocated and
728 * chained and points to the last buffer in the chain. */
729u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400730vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
731 u32 free_list_index,
732 vlib_buffer_t * first,
733 vlib_buffer_t ** last,
734 void *data, u16 data_len);
735void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000736
Dave Barach9b8ffd92016-07-08 08:13:45 -0400737format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
738 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700739
Dave Barach9b8ffd92016-07-08 08:13:45 -0400740typedef struct
741{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700742 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400743 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700744
Ed Warnickecb9cada2015-12-08 15:45:58 -0700745 /* Number of buffers to allocate in each call to physmem
746 allocator. */
747 u32 min_n_buffers_each_physmem_alloc;
748
749 /* Buffer free list for this template. */
750 u32 free_list_index;
751
Dave Barach9b8ffd92016-07-08 08:13:45 -0400752 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700753} vlib_packet_template_t;
754
755void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
756 vlib_packet_template_t * t);
757
758void vlib_packet_template_init (vlib_main_t * vm,
759 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400760 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700761 uword n_packet_data_bytes,
762 uword min_n_buffers_each_physmem_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400763 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700764
Dave Barach9b8ffd92016-07-08 08:13:45 -0400765void *vlib_packet_template_get_packet (vlib_main_t * vm,
766 vlib_packet_template_t * t,
767 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700768
769always_inline void
770vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
771{
772 vec_free (t->packet_data);
773}
774
775always_inline u32
776unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
777{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400778 serialize_stream_t *s = &m->stream;
779 vlib_serialize_buffer_main_t *sm
780 = uword_to_pointer (m->stream.data_function_opaque,
781 vlib_serialize_buffer_main_t *);
782 vlib_main_t *vm = sm->vlib_main;
783 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700784
785 n = s->n_buffer_bytes - s->current_buffer_index;
786 if (sm->last_buffer != ~0)
787 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400788 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700789 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
790 {
791 b = vlib_get_buffer (vm, b->next_buffer);
792 n += b->current_length;
793 }
794 }
795
Dave Barach9b8ffd92016-07-08 08:13:45 -0400796 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700797 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
798 n += vlib_buffer_index_length_in_chain (vm, f[0]);
799 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400800/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700801
802 return n;
803}
804
Ed Warnickecb9cada2015-12-08 15:45:58 -0700805/* Set a buffer quickly into "uninitialized" state. We want this to
806 be extremely cheap and arrange for all fields that need to be
807 initialized to be in the first 128 bits of the buffer. */
808always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100809vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700810 vlib_buffer_free_list_t * fl)
811{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100812 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700813
Damjan Marion19010202016-03-24 17:17:47 +0100814 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400815 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
816 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
817 CLIB_CACHE_LINE_BYTES);
818 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
819 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +0100820
Ed Warnickecb9cada2015-12-08 15:45:58 -0700821 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200822 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700823
Dave Barachf8690282017-03-01 11:38:02 -0500824 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
825 STRUCT_MARK_PTR (src, template_start),
826 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
827 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
828
829 /* Not in the first 16 octets. */
830 dst->n_add_refs = src->n_add_refs;
831
Ed Warnickecb9cada2015-12-08 15:45:58 -0700832 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500833#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400834 _(current_data);
835 _(current_length);
836 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700837#undef _
Florin Corasb2215d62017-08-01 16:56:58 -0700838 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
839 /* total_length_not_including_first_buffer is not in the template anymore
840 * so it may actually not zeroed for some buffers. One option is to
841 * uncomment the line lower (comes at a cost), the other, is to just not
842 * care */
843 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +0100844 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700845}
846
847always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100848vlib_buffer_add_to_free_list (vlib_main_t * vm,
849 vlib_buffer_free_list_t * f,
850 u32 buffer_index, u8 do_init)
851{
852 vlib_buffer_t *b;
853 b = vlib_get_buffer (vm, buffer_index);
854 if (PREDICT_TRUE (do_init))
855 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100856 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400857
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200858 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -0400859 {
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200860 vlib_buffer_free_list_t *mf;
861 mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
862 clib_spinlock_lock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400863 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200864 vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
865 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400866 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +0100867 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200868 clib_spinlock_unlock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400869 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100870}
871
872always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100873vlib_buffer_init_two_for_free_list (vlib_buffer_t * dst0,
874 vlib_buffer_t * dst1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700875 vlib_buffer_free_list_t * fl)
876{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100877 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700878
879 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200880 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700881
Dave Barachf8690282017-03-01 11:38:02 -0500882 clib_memcpy (STRUCT_MARK_PTR (dst0, template_start),
883 STRUCT_MARK_PTR (src, template_start),
884 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
885 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
886
887 clib_memcpy (STRUCT_MARK_PTR (dst1, template_start),
888 STRUCT_MARK_PTR (src, template_start),
889 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
890 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
891
892 /* Not in the first 16 octets. */
893 dst0->n_add_refs = src->n_add_refs;
894 dst1->n_add_refs = src->n_add_refs;
895
Ed Warnickecb9cada2015-12-08 15:45:58 -0700896 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500897#define _(f) ASSERT (dst0->f == src->f); ASSERT( dst1->f == src->f)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400898 _(current_data);
899 _(current_length);
900 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700901#undef _
Dave Barachf8690282017-03-01 11:38:02 -0500902
903 ASSERT (dst0->total_length_not_including_first_buffer == 0);
904 ASSERT (dst1->total_length_not_including_first_buffer == 0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100905 ASSERT (dst0->n_add_refs == 0);
906 ASSERT (dst1->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700907}
908
909#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +0100910extern u32 *vlib_buffer_state_validation_lock;
911extern uword *vlib_buffer_state_validation_hash;
912extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700913#endif
914
Dave Barach9b8ffd92016-07-08 08:13:45 -0400915static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700916vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
917{
918#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400919 uword *p;
920 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700921
922 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
923
924 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
925 ;
926
927 p = hash_get (vlib_buffer_state_validation_hash, b);
928
929 /* If we don't know about b, declare it to be in the expected state */
930 if (!p)
931 {
932 hash_set (vlib_buffer_state_validation_hash, b, expected);
933 goto out;
934 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400935
Ed Warnickecb9cada2015-12-08 15:45:58 -0700936 if (p[0] != expected)
937 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400938 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700939 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400940 vlib_main_t *vm = &vlib_global_main;
941
942 cj_stop ();
943
Ed Warnickecb9cada2015-12-08 15:45:58 -0700944 bi = vlib_get_buffer_index (vm, b);
945
946 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400947 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
948 vlib_time_now (vm), bi,
949 p[0] ? "busy" : "free", expected ? "busy" : "free");
950 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700951 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400952out:
953 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700954 *vlib_buffer_state_validation_lock = 0;
955 clib_mem_set_heap (oldheap);
956#endif
957}
958
Dave Barach9b8ffd92016-07-08 08:13:45 -0400959static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700960vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
961{
962#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400963 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700964
965 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
966
967 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
968 ;
969
970 hash_set (vlib_buffer_state_validation_hash, b, expected);
971
Dave Barach9b8ffd92016-07-08 08:13:45 -0400972 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700973 *vlib_buffer_state_validation_lock = 0;
974 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400975#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700976}
977
978#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400979
980/*
981 * fd.io coding-style-patch-verification: ON
982 *
983 * Local Variables:
984 * eval: (c-set-style "gnu")
985 * End:
986 */