blob: c7b5321ffac7b5870fd0bf3295b3b43407ba300c [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Marion04a7f052017-07-10 15:06:17 +020059 vlib_buffer_main_t *bm = vm->buffer_main;
60 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
66/** \brief Translate buffer pointer into buffer index
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -040069 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -070070 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -040071*/
Damjan Marion04a7f052017-07-10 15:06:17 +020072
Ed Warnickecb9cada2015-12-08 15:45:58 -070073always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -040074vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070075{
Damjan Marion04a7f052017-07-10 15:06:17 +020076 vlib_buffer_main_t *bm = vm->buffer_main;
77 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
78 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
79 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
82}
83
84/** \brief Get next buffer in buffer linklist, or zero for end of list.
85
86 @param vm - (vlib_main_t *) vlib main data structure pointer
87 @param b - (void *) buffer pointer
88 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -040089*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070090always_inline vlib_buffer_t *
91vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
92{
93 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070095}
96
Dave Barach9b8ffd92016-07-08 08:13:45 -040097uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
98 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
100/** \brief Get length in bytes of the buffer chain
101
102 @param vm - (vlib_main_t *) vlib main data structure pointer
103 @param b - (void *) buffer pointer
104 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400105*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106always_inline uword
107vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
108{
Damjan Marion072401e2017-07-13 18:53:27 +0200109 uword len = b->current_length;
110
111 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
112 return len;
113
114 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
115 return len + b->total_length_not_including_first_buffer;
116
117 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118}
119
120/** \brief Get length in bytes of the buffer index buffer chain
121
122 @param vm - (vlib_main_t *) vlib main data structure pointer
123 @param bi - (u32) buffer index
124 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400125*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126always_inline uword
127vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
128{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400129 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130 return vlib_buffer_length_in_chain (vm, b);
131}
132
133/** \brief Copy buffer contents to memory
134
135 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400136 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 @param contents - (u8 *) memory, <strong>must be large enough</strong>
138 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400139*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140always_inline uword
141vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
142{
143 uword content_len = 0;
144 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400145 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146
147 while (1)
148 {
149 b = vlib_get_buffer (vm, buffer_index);
150 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100151 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400153 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154 break;
155 buffer_index = b->next_buffer;
156 }
157
158 return content_len;
159}
160
161/* Return physical address of buffer->data start. */
162always_inline u64
163vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
164{
Damjan Marion149ba772017-10-12 13:09:26 +0200165 vlib_physmem_region_index_t pri;
166 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
167 pri = vm->buffer_main->buffer_pools[b->buffer_pool_index].physmem_region;
168 return vlib_physmem_offset_to_physical (vm, pri,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400169 (((uword) buffer_index) <<
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170 CLIB_LOG2_CACHE_LINE_BYTES) +
Dave Barach9b8ffd92016-07-08 08:13:45 -0400171 STRUCT_OFFSET_OF (vlib_buffer_t,
172 data));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173}
174
175/** \brief Prefetch buffer metadata by buffer index
176 The first 64 bytes of buffer contains most header information
177
178 @param vm - (vlib_main_t *) vlib main data structure pointer
179 @param bi - (u32) buffer index
180 @param type - LOAD, STORE. In most cases, STORE is the right answer
181*/
182/* Prefetch buffer header given index. */
183#define vlib_prefetch_buffer_with_index(vm,bi,type) \
184 do { \
185 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
186 vlib_prefetch_buffer_header (_b, type); \
187 } while (0)
188
189#if 0
190/* Iterate over known allocated vlib bufs. You probably do not want
191 * to do this!
192 @param vm the vlib_main_t
193 @param bi found allocated buffer index
194 @param body operation to perform on buffer index
195 function executes body for each allocated buffer index
196 */
197#define vlib_buffer_foreach_allocated(vm,bi,body) \
198do { \
199 vlib_main_t * _vmain = (vm); \
200 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
201 hash_pair_t * _vbpair; \
202 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
203 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
204 (bi) = _vbpair->key; \
205 body; \
206 } \
207 })); \
208} while (0)
209#endif
210
Dave Barach9b8ffd92016-07-08 08:13:45 -0400211typedef enum
212{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700213 /* Index is unknown. */
214 VLIB_BUFFER_UNKNOWN,
215
216 /* Index is known and free/allocated. */
217 VLIB_BUFFER_KNOWN_FREE,
218 VLIB_BUFFER_KNOWN_ALLOCATED,
219} vlib_buffer_known_state_t;
220
221always_inline vlib_buffer_known_state_t
222vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
223{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400224 vlib_buffer_main_t *bm = vm->buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225
Damjan Marion6b0f5892017-07-27 04:01:24 -0400226 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400227 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400228 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
230}
231
232always_inline void
233vlib_buffer_set_known_state (vlib_main_t * vm,
234 u32 buffer_index,
235 vlib_buffer_known_state_t state)
236{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400237 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion6b0f5892017-07-27 04:01:24 -0400238 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700239 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400240 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241}
242
243/* Validates sanity of a single buffer.
244 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
246 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700247
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248/** \brief Allocate buffers into supplied array
249
250 @param vm - (vlib_main_t *) vlib main data structure pointer
251 @param buffers - (u32 * ) buffer index array
252 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254 less than the number requested or zero
255*/
Damjan Marion878c6092017-01-04 13:19:27 +0100256always_inline u32
257vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
258{
259 vlib_buffer_main_t *bm = vm->buffer_main;
260
261 ASSERT (bm->cb.vlib_buffer_alloc_cb);
262
263 return bm->cb.vlib_buffer_alloc_cb (vm, buffers, n_buffers);
264}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
266always_inline u32
267vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400268{
269 return round_pow2 (size, sizeof (vlib_buffer_t));
270}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271
Damjan Marion072401e2017-07-13 18:53:27 +0200272always_inline u32
273vlib_buffer_get_free_list_index (vlib_buffer_t * b)
274{
275 return b->flags & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
276}
277
278always_inline void
279vlib_buffer_set_free_list_index (vlib_buffer_t * b, u32 index)
280{
281 /* if there is an need for more free lists we should consider
282 storig data in the 2nd cacheline */
283 ASSERT (VLIB_BUFFER_FREE_LIST_INDEX_MASK & 1);
284 ASSERT (index <= VLIB_BUFFER_FREE_LIST_INDEX_MASK);
285
286 b->flags &= ~VLIB_BUFFER_FREE_LIST_INDEX_MASK;
287 b->flags |= index & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
288}
289
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290/** \brief Allocate buffers from specific freelist into supplied array
291
292 @param vm - (vlib_main_t *) vlib main data structure pointer
293 @param buffers - (u32 * ) buffer index array
294 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400295 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296 less than the number requested or zero
297*/
Damjan Marion878c6092017-01-04 13:19:27 +0100298always_inline u32
299vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
300 u32 * buffers,
301 u32 n_buffers, u32 free_list_index)
302{
303 vlib_buffer_main_t *bm = vm->buffer_main;
304
305 ASSERT (bm->cb.vlib_buffer_alloc_from_free_list_cb);
306
307 return bm->cb.vlib_buffer_alloc_from_free_list_cb (vm, buffers, n_buffers,
308 free_list_index);
309}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310
311/** \brief Free buffers
312 Frees the entire buffer chain for each buffer
313
314 @param vm - (vlib_main_t *) vlib main data structure pointer
315 @param buffers - (u32 * ) buffer index array
316 @param n_buffers - (u32) number of buffers to free
317
318*/
Damjan Marion878c6092017-01-04 13:19:27 +0100319always_inline void
320vlib_buffer_free (vlib_main_t * vm,
321 /* pointer to first buffer */
322 u32 * buffers,
323 /* number of buffers to free */
324 u32 n_buffers)
325{
326 vlib_buffer_main_t *bm = vm->buffer_main;
327
328 ASSERT (bm->cb.vlib_buffer_free_cb);
329
330 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
331}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332
333/** \brief Free buffers, does not free the buffer chain for each buffer
334
335 @param vm - (vlib_main_t *) vlib main data structure pointer
336 @param buffers - (u32 * ) buffer index array
337 @param n_buffers - (u32) number of buffers to free
338
339*/
Damjan Marion878c6092017-01-04 13:19:27 +0100340always_inline void
341vlib_buffer_free_no_next (vlib_main_t * vm,
342 /* pointer to first buffer */
343 u32 * buffers,
344 /* number of buffers to free */
345 u32 n_buffers)
346{
347 vlib_buffer_main_t *bm = vm->buffer_main;
348
349 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
350
351 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
352}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353
354/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400355 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700356
357 @param vm - (vlib_main_t *) vlib main data structure pointer
358 @param buffer_index - (u32) buffer index to free
359*/
360always_inline void
361vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
362{
363 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
364}
365
366/* Add/delete buffer free lists. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400367u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
368 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100369always_inline void
370vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
371{
372 vlib_buffer_main_t *bm = vm->buffer_main;
373
374 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
375
376 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
377}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378
379/* Find already existing public free list with given size or create one. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400380u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
381 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100383/* Merge two free lists */
384void vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
385 vlib_buffer_free_list_t * src);
386
387/* Make sure we have at least given number of unaligned buffers. */
388void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
389 vlib_buffer_free_list_t *
390 free_list,
391 uword n_unaligned_buffers);
392
393always_inline u32
394vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
395{
396 vlib_buffer_main_t *bm = vm->buffer_main;
397
398 size = vlib_buffer_round_size (size);
399 uword *p = hash_get (bm->free_list_by_size, size);
400 return p ? p[0] : ~0;
401}
402
403always_inline vlib_buffer_free_list_t *
404vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
405 u32 * index)
406{
407 vlib_buffer_main_t *bm = vm->buffer_main;
408 u32 i;
409
Damjan Marion072401e2017-07-13 18:53:27 +0200410 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100411 return pool_elt_at_index (bm->buffer_free_list_pool, i);
412}
413
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414always_inline vlib_buffer_free_list_t *
415vlib_buffer_get_free_list (vlib_main_t * vm, u32 free_list_index)
416{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400417 vlib_buffer_main_t *bm = vm->buffer_main;
418 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700419
420 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
421
422 /* Sanity: indices must match. */
423 ASSERT (f->index == free_list_index);
424
425 return f;
426}
427
428always_inline u32
429vlib_buffer_free_list_buffer_size (vlib_main_t * vm, u32 free_list_index)
430{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400431 vlib_buffer_free_list_t *f =
432 vlib_buffer_get_free_list (vm, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700433 return f->n_data_bytes;
434}
435
Dave Barach9b8ffd92016-07-08 08:13:45 -0400436void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437
438/* Reasonably fast buffer copy routine. */
439always_inline void
440vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
441{
442 while (n >= 4)
443 {
444 dst[0] = src[0];
445 dst[1] = src[1];
446 dst[2] = src[2];
447 dst[3] = src[3];
448 dst += 4;
449 src += 4;
450 n -= 4;
451 }
452 while (n > 0)
453 {
454 dst[0] = src[0];
455 dst += 1;
456 src += 1;
457 n -= 1;
458 }
459}
460
Ed Warnickecb9cada2015-12-08 15:45:58 -0700461/* Append given data to end of buffer, possibly allocating new buffers. */
462u32 vlib_buffer_add_data (vlib_main_t * vm,
463 u32 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400464 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700465
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100466/* duplicate all buffers in chain */
467always_inline vlib_buffer_t *
468vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
469{
470 vlib_buffer_t *s, *d, *fd;
471 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100472 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100473 int i;
474
475 s = b;
476 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
477 {
478 n_buffers++;
479 s = vlib_get_buffer (vm, s->next_buffer);
480 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700481 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100482
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100483 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500484
485 /* No guarantee that we'll get all the buffers we asked for */
486 if (PREDICT_FALSE (n_alloc < n_buffers))
487 {
488 if (n_alloc > 0)
489 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500490 return 0;
491 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100492
493 /* 1st segment */
494 s = b;
495 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100496 d->current_data = s->current_data;
497 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100498 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100499 d->total_length_not_including_first_buffer =
500 s->total_length_not_including_first_buffer;
501 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Damjan Mariondce05452016-12-01 11:59:33 +0100502 clib_memcpy (vlib_buffer_get_current (d),
503 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100504
505 /* next segments */
506 for (i = 1; i < n_buffers; i++)
507 {
508 /* previous */
509 d->next_buffer = new_buffers[i];
510 /* current */
511 s = vlib_get_buffer (vm, s->next_buffer);
512 d = vlib_get_buffer (vm, new_buffers[i]);
513 d->current_data = s->current_data;
514 d->current_length = s->current_length;
515 clib_memcpy (vlib_buffer_get_current (d),
516 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100517 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100518 }
519
520 return fd;
521}
522
Damjan Marionc47ed032017-01-25 14:18:03 +0100523/** \brief Create multiple clones of buffer and store them in the supplied array
524
525 @param vm - (vlib_main_t *) vlib main data structure pointer
526 @param src_buffer - (u32) source buffer index
527 @param buffers - (u32 * ) buffer index array
528 @param n_buffers - (u8) number of buffer clones requested
529 @param head_end_offset - (u16) offset relative to current position
530 where packet head ends
531 @return - (u8) number of buffers actually cloned, may be
532 less than the number requested or zero
533*/
534
535always_inline u8
536vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
537 u8 n_buffers, u16 head_end_offset)
538{
539 u8 i;
540 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
541
542 ASSERT (s->n_add_refs == 0);
543 ASSERT (n_buffers);
544
545 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
546 {
547 buffers[0] = src_buffer;
548 for (i = 1; i < n_buffers; i++)
549 {
550 vlib_buffer_t *d;
551 d = vlib_buffer_copy (vm, s);
552 if (d == 0)
553 return i;
554 buffers[i] = vlib_get_buffer_index (vm, d);
555
556 }
557 return n_buffers;
558 }
559
560 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
Damjan Marion072401e2017-07-13 18:53:27 +0200561 vlib_buffer_get_free_list_index
562 (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100563 if (PREDICT_FALSE (n_buffers == 0))
564 {
565 buffers[0] = src_buffer;
566 return 1;
567 }
568
569 for (i = 0; i < n_buffers; i++)
570 {
571 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
572 d->current_data = s->current_data;
573 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200574 vlib_buffer_set_free_list_index (d,
575 vlib_buffer_get_free_list_index (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100576 d->total_length_not_including_first_buffer =
577 s->total_length_not_including_first_buffer + s->current_length -
578 head_end_offset;
579 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
580 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
581 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
582 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
583 head_end_offset);
584 d->next_buffer = src_buffer;
585 }
586 vlib_buffer_advance (s, head_end_offset);
587 s->n_add_refs = n_buffers - 1;
588 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
589 {
590 s = vlib_get_buffer (vm, s->next_buffer);
591 s->n_add_refs = n_buffers - 1;
592 }
593
594 return n_buffers;
595}
596
597/** \brief Attach cloned tail to the buffer
598
599 @param vm - (vlib_main_t *) vlib main data structure pointer
600 @param head - (vlib_buffer_t *) head buffer
601 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
602*/
603
604always_inline void
605vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
606 vlib_buffer_t * tail)
607{
608 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200609 ASSERT (vlib_buffer_get_free_list_index (head) ==
610 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100611
612 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
613 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
614 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
615 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
616 head->next_buffer = vlib_get_buffer_index (vm, tail);
617 head->total_length_not_including_first_buffer = tail->current_length +
618 tail->total_length_not_including_first_buffer;
619
620next_segment:
621 __sync_add_and_fetch (&tail->n_add_refs, 1);
622
623 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
624 {
625 tail = vlib_get_buffer (vm, tail->next_buffer);
626 goto next_segment;
627 }
628}
629
Pierre Pfister328e99b2016-02-12 13:18:42 +0000630/* Initializes the buffer as an empty packet with no chained buffers. */
631always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400632vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000633{
634 first->total_length_not_including_first_buffer = 0;
635 first->current_length = 0;
636 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
637 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000638}
639
640/* The provided next_bi buffer index is appended to the end of the packet. */
641always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400642vlib_buffer_chain_buffer (vlib_main_t * vm,
643 vlib_buffer_t * first,
644 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000645{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400646 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000647 last->next_buffer = next_bi;
648 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
649 next_buffer->current_length = 0;
650 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000651 return next_buffer;
652}
653
654/* Increases or decreases the packet length.
655 * It does not allocate or deallocate new buffers.
656 * Therefore, the added length must be compatible
657 * with the last buffer. */
658always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400659vlib_buffer_chain_increase_length (vlib_buffer_t * first,
660 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000661{
662 last->current_length += len;
663 if (first != last)
664 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000665}
666
667/* Copy data to the end of the packet and increases its length.
668 * It does not allocate new buffers.
669 * Returns the number of copied bytes. */
670always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400671vlib_buffer_chain_append_data (vlib_main_t * vm,
672 u32 free_list_index,
673 vlib_buffer_t * first,
674 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000675{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400676 u32 n_buffer_bytes =
677 vlib_buffer_free_list_buffer_size (vm, free_list_index);
678 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
679 u16 len = clib_min (data_len,
680 n_buffer_bytes - last->current_length -
681 last->current_data);
682 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
683 len);
684 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000685 return len;
686}
687
688/* Copy data to the end of the packet and increases its length.
689 * Allocates additional buffers from the free list if necessary.
690 * Returns the number of copied bytes.
691 * 'last' value is modified whenever new buffers are allocated and
692 * chained and points to the last buffer in the chain. */
693u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400694vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
695 u32 free_list_index,
696 vlib_buffer_t * first,
697 vlib_buffer_t ** last,
698 void *data, u16 data_len);
699void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000700
Dave Barach9b8ffd92016-07-08 08:13:45 -0400701format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
702 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700703
Dave Barach9b8ffd92016-07-08 08:13:45 -0400704typedef struct
705{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700706 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400707 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708
Ed Warnickecb9cada2015-12-08 15:45:58 -0700709 /* Number of buffers to allocate in each call to physmem
710 allocator. */
711 u32 min_n_buffers_each_physmem_alloc;
712
713 /* Buffer free list for this template. */
714 u32 free_list_index;
715
Dave Barach9b8ffd92016-07-08 08:13:45 -0400716 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700717} vlib_packet_template_t;
718
719void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
720 vlib_packet_template_t * t);
721
722void vlib_packet_template_init (vlib_main_t * vm,
723 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400724 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700725 uword n_packet_data_bytes,
726 uword min_n_buffers_each_physmem_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400727 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700728
Dave Barach9b8ffd92016-07-08 08:13:45 -0400729void *vlib_packet_template_get_packet (vlib_main_t * vm,
730 vlib_packet_template_t * t,
731 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700732
733always_inline void
734vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
735{
736 vec_free (t->packet_data);
737}
738
739always_inline u32
740unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
741{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400742 serialize_stream_t *s = &m->stream;
743 vlib_serialize_buffer_main_t *sm
744 = uword_to_pointer (m->stream.data_function_opaque,
745 vlib_serialize_buffer_main_t *);
746 vlib_main_t *vm = sm->vlib_main;
747 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700748
749 n = s->n_buffer_bytes - s->current_buffer_index;
750 if (sm->last_buffer != ~0)
751 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400752 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700753 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
754 {
755 b = vlib_get_buffer (vm, b->next_buffer);
756 n += b->current_length;
757 }
758 }
759
Dave Barach9b8ffd92016-07-08 08:13:45 -0400760 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700761 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
762 n += vlib_buffer_index_length_in_chain (vm, f[0]);
763 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400764/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700765
766 return n;
767}
768
Ed Warnickecb9cada2015-12-08 15:45:58 -0700769/* Set a buffer quickly into "uninitialized" state. We want this to
770 be extremely cheap and arrange for all fields that need to be
771 initialized to be in the first 128 bits of the buffer. */
772always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100773vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700774 vlib_buffer_free_list_t * fl)
775{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100776 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700777
Damjan Marion19010202016-03-24 17:17:47 +0100778 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400779 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
780 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
781 CLIB_CACHE_LINE_BYTES);
782 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
783 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +0100784
Ed Warnickecb9cada2015-12-08 15:45:58 -0700785 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200786 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700787
Dave Barachf8690282017-03-01 11:38:02 -0500788 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
789 STRUCT_MARK_PTR (src, template_start),
790 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
791 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
792
793 /* Not in the first 16 octets. */
794 dst->n_add_refs = src->n_add_refs;
795
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500797#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400798 _(current_data);
799 _(current_length);
800 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700801#undef _
Florin Corasb2215d62017-08-01 16:56:58 -0700802 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
803 /* total_length_not_including_first_buffer is not in the template anymore
804 * so it may actually not zeroed for some buffers. One option is to
805 * uncomment the line lower (comes at a cost), the other, is to just not
806 * care */
807 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +0100808 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700809}
810
811always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100812vlib_buffer_add_to_free_list (vlib_main_t * vm,
813 vlib_buffer_free_list_t * f,
814 u32 buffer_index, u8 do_init)
815{
816 vlib_buffer_t *b;
817 b = vlib_get_buffer (vm, buffer_index);
818 if (PREDICT_TRUE (do_init))
819 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100820 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400821
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200822 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -0400823 {
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200824 vlib_buffer_free_list_t *mf;
825 mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
826 clib_spinlock_lock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400827 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200828 vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
829 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400830 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +0100831 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200832 clib_spinlock_unlock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400833 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100834}
835
836always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100837vlib_buffer_init_two_for_free_list (vlib_buffer_t * dst0,
838 vlib_buffer_t * dst1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700839 vlib_buffer_free_list_t * fl)
840{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100841 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700842
843 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200844 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700845
Dave Barachf8690282017-03-01 11:38:02 -0500846 clib_memcpy (STRUCT_MARK_PTR (dst0, template_start),
847 STRUCT_MARK_PTR (src, template_start),
848 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
849 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
850
851 clib_memcpy (STRUCT_MARK_PTR (dst1, template_start),
852 STRUCT_MARK_PTR (src, template_start),
853 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
854 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
855
856 /* Not in the first 16 octets. */
857 dst0->n_add_refs = src->n_add_refs;
858 dst1->n_add_refs = src->n_add_refs;
859
Ed Warnickecb9cada2015-12-08 15:45:58 -0700860 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500861#define _(f) ASSERT (dst0->f == src->f); ASSERT( dst1->f == src->f)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400862 _(current_data);
863 _(current_length);
864 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700865#undef _
Dave Barachf8690282017-03-01 11:38:02 -0500866
867 ASSERT (dst0->total_length_not_including_first_buffer == 0);
868 ASSERT (dst1->total_length_not_including_first_buffer == 0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100869 ASSERT (dst0->n_add_refs == 0);
870 ASSERT (dst1->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700871}
872
873#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +0100874extern u32 *vlib_buffer_state_validation_lock;
875extern uword *vlib_buffer_state_validation_hash;
876extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700877#endif
878
Dave Barach9b8ffd92016-07-08 08:13:45 -0400879static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700880vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
881{
882#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400883 uword *p;
884 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700885
886 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
887
888 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
889 ;
890
891 p = hash_get (vlib_buffer_state_validation_hash, b);
892
893 /* If we don't know about b, declare it to be in the expected state */
894 if (!p)
895 {
896 hash_set (vlib_buffer_state_validation_hash, b, expected);
897 goto out;
898 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400899
Ed Warnickecb9cada2015-12-08 15:45:58 -0700900 if (p[0] != expected)
901 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400902 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700903 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400904 vlib_main_t *vm = &vlib_global_main;
905
906 cj_stop ();
907
Ed Warnickecb9cada2015-12-08 15:45:58 -0700908 bi = vlib_get_buffer_index (vm, b);
909
910 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400911 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
912 vlib_time_now (vm), bi,
913 p[0] ? "busy" : "free", expected ? "busy" : "free");
914 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700915 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400916out:
917 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700918 *vlib_buffer_state_validation_lock = 0;
919 clib_mem_set_heap (oldheap);
920#endif
921}
922
Dave Barach9b8ffd92016-07-08 08:13:45 -0400923static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700924vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
925{
926#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400927 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700928
929 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
930
931 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
932 ;
933
934 hash_set (vlib_buffer_state_validation_hash, b, expected);
935
Dave Barach9b8ffd92016-07-08 08:13:45 -0400936 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700937 *vlib_buffer_state_validation_lock = 0;
938 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400939#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700940}
941
942#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400943
944/*
945 * fd.io coding-style-patch-verification: ON
946 *
947 * Local Variables:
948 * eval: (c-set-style "gnu")
949 * End:
950 */