blob: 78bf93176987d87026336e6d31386123a184baa8 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Marion04a7f052017-07-10 15:06:17 +020059 vlib_buffer_main_t *bm = vm->buffer_main;
60 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
66/** \brief Translate buffer pointer into buffer index
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -040069 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -070070 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -040071*/
Damjan Marion04a7f052017-07-10 15:06:17 +020072
Ed Warnickecb9cada2015-12-08 15:45:58 -070073always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -040074vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070075{
Damjan Marion04a7f052017-07-10 15:06:17 +020076 vlib_buffer_main_t *bm = vm->buffer_main;
77 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
78 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
79 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
82}
83
84/** \brief Get next buffer in buffer linklist, or zero for end of list.
85
86 @param vm - (vlib_main_t *) vlib main data structure pointer
87 @param b - (void *) buffer pointer
88 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -040089*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070090always_inline vlib_buffer_t *
91vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
92{
93 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070095}
96
Dave Barach9b8ffd92016-07-08 08:13:45 -040097uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
98 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
100/** \brief Get length in bytes of the buffer chain
101
102 @param vm - (vlib_main_t *) vlib main data structure pointer
103 @param b - (void *) buffer pointer
104 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400105*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106always_inline uword
107vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
108{
Damjan Marion072401e2017-07-13 18:53:27 +0200109 uword len = b->current_length;
110
111 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
112 return len;
113
114 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
115 return len + b->total_length_not_including_first_buffer;
116
117 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118}
119
120/** \brief Get length in bytes of the buffer index buffer chain
121
122 @param vm - (vlib_main_t *) vlib main data structure pointer
123 @param bi - (u32) buffer index
124 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400125*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126always_inline uword
127vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
128{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400129 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130 return vlib_buffer_length_in_chain (vm, b);
131}
132
133/** \brief Copy buffer contents to memory
134
135 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400136 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 @param contents - (u8 *) memory, <strong>must be large enough</strong>
138 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400139*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140always_inline uword
141vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
142{
143 uword content_len = 0;
144 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400145 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146
147 while (1)
148 {
149 b = vlib_get_buffer (vm, buffer_index);
150 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100151 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400153 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154 break;
155 buffer_index = b->next_buffer;
156 }
157
158 return content_len;
159}
160
161/* Return physical address of buffer->data start. */
162always_inline u64
163vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
164{
165 return vlib_physmem_offset_to_physical (&vm->physmem_main,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400166 (((uword) buffer_index) <<
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167 CLIB_LOG2_CACHE_LINE_BYTES) +
Dave Barach9b8ffd92016-07-08 08:13:45 -0400168 STRUCT_OFFSET_OF (vlib_buffer_t,
169 data));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170}
171
172/** \brief Prefetch buffer metadata by buffer index
173 The first 64 bytes of buffer contains most header information
174
175 @param vm - (vlib_main_t *) vlib main data structure pointer
176 @param bi - (u32) buffer index
177 @param type - LOAD, STORE. In most cases, STORE is the right answer
178*/
179/* Prefetch buffer header given index. */
180#define vlib_prefetch_buffer_with_index(vm,bi,type) \
181 do { \
182 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
183 vlib_prefetch_buffer_header (_b, type); \
184 } while (0)
185
186#if 0
187/* Iterate over known allocated vlib bufs. You probably do not want
188 * to do this!
189 @param vm the vlib_main_t
190 @param bi found allocated buffer index
191 @param body operation to perform on buffer index
192 function executes body for each allocated buffer index
193 */
194#define vlib_buffer_foreach_allocated(vm,bi,body) \
195do { \
196 vlib_main_t * _vmain = (vm); \
197 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
198 hash_pair_t * _vbpair; \
199 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
200 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
201 (bi) = _vbpair->key; \
202 body; \
203 } \
204 })); \
205} while (0)
206#endif
207
Dave Barach9b8ffd92016-07-08 08:13:45 -0400208typedef enum
209{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210 /* Index is unknown. */
211 VLIB_BUFFER_UNKNOWN,
212
213 /* Index is known and free/allocated. */
214 VLIB_BUFFER_KNOWN_FREE,
215 VLIB_BUFFER_KNOWN_ALLOCATED,
216} vlib_buffer_known_state_t;
217
218always_inline vlib_buffer_known_state_t
219vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
220{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400221 vlib_buffer_main_t *bm = vm->buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700222
Damjan Marion6b0f5892017-07-27 04:01:24 -0400223 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400224 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400225 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700226 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
227}
228
229always_inline void
230vlib_buffer_set_known_state (vlib_main_t * vm,
231 u32 buffer_index,
232 vlib_buffer_known_state_t state)
233{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400234 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion6b0f5892017-07-27 04:01:24 -0400235 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400237 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238}
239
240/* Validates sanity of a single buffer.
241 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400242u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
243 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244
Ed Warnickecb9cada2015-12-08 15:45:58 -0700245/** \brief Allocate buffers into supplied array
246
247 @param vm - (vlib_main_t *) vlib main data structure pointer
248 @param buffers - (u32 * ) buffer index array
249 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400250 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251 less than the number requested or zero
252*/
Damjan Marion878c6092017-01-04 13:19:27 +0100253always_inline u32
254vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
255{
256 vlib_buffer_main_t *bm = vm->buffer_main;
257
258 ASSERT (bm->cb.vlib_buffer_alloc_cb);
259
260 return bm->cb.vlib_buffer_alloc_cb (vm, buffers, n_buffers);
261}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262
263always_inline u32
264vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400265{
266 return round_pow2 (size, sizeof (vlib_buffer_t));
267}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268
Damjan Marion072401e2017-07-13 18:53:27 +0200269always_inline u32
270vlib_buffer_get_free_list_index (vlib_buffer_t * b)
271{
272 return b->flags & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
273}
274
275always_inline void
276vlib_buffer_set_free_list_index (vlib_buffer_t * b, u32 index)
277{
278 /* if there is an need for more free lists we should consider
279 storig data in the 2nd cacheline */
280 ASSERT (VLIB_BUFFER_FREE_LIST_INDEX_MASK & 1);
281 ASSERT (index <= VLIB_BUFFER_FREE_LIST_INDEX_MASK);
282
283 b->flags &= ~VLIB_BUFFER_FREE_LIST_INDEX_MASK;
284 b->flags |= index & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
285}
286
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287/** \brief Allocate buffers from specific freelist into supplied array
288
289 @param vm - (vlib_main_t *) vlib main data structure pointer
290 @param buffers - (u32 * ) buffer index array
291 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400292 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 less than the number requested or zero
294*/
Damjan Marion878c6092017-01-04 13:19:27 +0100295always_inline u32
296vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
297 u32 * buffers,
298 u32 n_buffers, u32 free_list_index)
299{
300 vlib_buffer_main_t *bm = vm->buffer_main;
301
302 ASSERT (bm->cb.vlib_buffer_alloc_from_free_list_cb);
303
304 return bm->cb.vlib_buffer_alloc_from_free_list_cb (vm, buffers, n_buffers,
305 free_list_index);
306}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307
308/** \brief Free buffers
309 Frees the entire buffer chain for each buffer
310
311 @param vm - (vlib_main_t *) vlib main data structure pointer
312 @param buffers - (u32 * ) buffer index array
313 @param n_buffers - (u32) number of buffers to free
314
315*/
Damjan Marion878c6092017-01-04 13:19:27 +0100316always_inline void
317vlib_buffer_free (vlib_main_t * vm,
318 /* pointer to first buffer */
319 u32 * buffers,
320 /* number of buffers to free */
321 u32 n_buffers)
322{
323 vlib_buffer_main_t *bm = vm->buffer_main;
324
325 ASSERT (bm->cb.vlib_buffer_free_cb);
326
327 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
328}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329
330/** \brief Free buffers, does not free the buffer chain for each buffer
331
332 @param vm - (vlib_main_t *) vlib main data structure pointer
333 @param buffers - (u32 * ) buffer index array
334 @param n_buffers - (u32) number of buffers to free
335
336*/
Damjan Marion878c6092017-01-04 13:19:27 +0100337always_inline void
338vlib_buffer_free_no_next (vlib_main_t * vm,
339 /* pointer to first buffer */
340 u32 * buffers,
341 /* number of buffers to free */
342 u32 n_buffers)
343{
344 vlib_buffer_main_t *bm = vm->buffer_main;
345
346 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
347
348 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
349}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350
351/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400352 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353
354 @param vm - (vlib_main_t *) vlib main data structure pointer
355 @param buffer_index - (u32) buffer index to free
356*/
357always_inline void
358vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
359{
360 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
361}
362
363/* Add/delete buffer free lists. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400364u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
365 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100366always_inline void
367vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
368{
369 vlib_buffer_main_t *bm = vm->buffer_main;
370
371 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
372
373 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
374}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700375
376/* Find already existing public free list with given size or create one. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400377u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
378 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100380/* Merge two free lists */
381void vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
382 vlib_buffer_free_list_t * src);
383
384/* Make sure we have at least given number of unaligned buffers. */
385void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
386 vlib_buffer_free_list_t *
387 free_list,
388 uword n_unaligned_buffers);
389
390always_inline u32
391vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
392{
393 vlib_buffer_main_t *bm = vm->buffer_main;
394
395 size = vlib_buffer_round_size (size);
396 uword *p = hash_get (bm->free_list_by_size, size);
397 return p ? p[0] : ~0;
398}
399
400always_inline vlib_buffer_free_list_t *
401vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
402 u32 * index)
403{
404 vlib_buffer_main_t *bm = vm->buffer_main;
405 u32 i;
406
Damjan Marion072401e2017-07-13 18:53:27 +0200407 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100408 return pool_elt_at_index (bm->buffer_free_list_pool, i);
409}
410
Ed Warnickecb9cada2015-12-08 15:45:58 -0700411always_inline vlib_buffer_free_list_t *
412vlib_buffer_get_free_list (vlib_main_t * vm, u32 free_list_index)
413{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400414 vlib_buffer_main_t *bm = vm->buffer_main;
415 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700416
417 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
418
419 /* Sanity: indices must match. */
420 ASSERT (f->index == free_list_index);
421
422 return f;
423}
424
425always_inline u32
426vlib_buffer_free_list_buffer_size (vlib_main_t * vm, u32 free_list_index)
427{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400428 vlib_buffer_free_list_t *f =
429 vlib_buffer_get_free_list (vm, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430 return f->n_data_bytes;
431}
432
Dave Barach9b8ffd92016-07-08 08:13:45 -0400433void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434
435/* Reasonably fast buffer copy routine. */
436always_inline void
437vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
438{
439 while (n >= 4)
440 {
441 dst[0] = src[0];
442 dst[1] = src[1];
443 dst[2] = src[2];
444 dst[3] = src[3];
445 dst += 4;
446 src += 4;
447 n -= 4;
448 }
449 while (n > 0)
450 {
451 dst[0] = src[0];
452 dst += 1;
453 src += 1;
454 n -= 1;
455 }
456}
457
458always_inline void *
459vlib_physmem_alloc_aligned (vlib_main_t * vm, clib_error_t ** error,
460 uword n_bytes, uword alignment)
461{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400462 void *r =
463 vm->os_physmem_alloc_aligned (&vm->physmem_main, n_bytes, alignment);
464 if (!r)
465 *error =
466 clib_error_return (0, "failed to allocate %wd bytes of I/O memory",
467 n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700468 else
469 *error = 0;
470 return r;
471}
472
473/* By default allocate I/O memory with cache line alignment. */
474always_inline void *
475vlib_physmem_alloc (vlib_main_t * vm, clib_error_t ** error, uword n_bytes)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400476{
477 return vlib_physmem_alloc_aligned (vm, error, n_bytes,
478 CLIB_CACHE_LINE_BYTES);
479}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700480
481always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400482vlib_physmem_free (vlib_main_t * vm, void *mem)
483{
484 return vm->os_physmem_free (mem);
485}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486
487always_inline u64
Dave Barach9b8ffd92016-07-08 08:13:45 -0400488vlib_physmem_virtual_to_physical (vlib_main_t * vm, void *mem)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700489{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400490 vlib_physmem_main_t *pm = &vm->physmem_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491 uword o = pointer_to_uword (mem) - pm->virtual.start;
492 return vlib_physmem_offset_to_physical (pm, o);
493}
494
495/* Append given data to end of buffer, possibly allocating new buffers. */
496u32 vlib_buffer_add_data (vlib_main_t * vm,
497 u32 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400498 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700499
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100500/* duplicate all buffers in chain */
501always_inline vlib_buffer_t *
502vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
503{
504 vlib_buffer_t *s, *d, *fd;
505 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100506 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100507 int i;
508
509 s = b;
510 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
511 {
512 n_buffers++;
513 s = vlib_get_buffer (vm, s->next_buffer);
514 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700515 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100516
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100517 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500518
519 /* No guarantee that we'll get all the buffers we asked for */
520 if (PREDICT_FALSE (n_alloc < n_buffers))
521 {
522 if (n_alloc > 0)
523 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500524 return 0;
525 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100526
527 /* 1st segment */
528 s = b;
529 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100530 d->current_data = s->current_data;
531 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100532 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100533 d->total_length_not_including_first_buffer =
534 s->total_length_not_including_first_buffer;
535 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Damjan Mariondce05452016-12-01 11:59:33 +0100536 clib_memcpy (vlib_buffer_get_current (d),
537 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100538
539 /* next segments */
540 for (i = 1; i < n_buffers; i++)
541 {
542 /* previous */
543 d->next_buffer = new_buffers[i];
544 /* current */
545 s = vlib_get_buffer (vm, s->next_buffer);
546 d = vlib_get_buffer (vm, new_buffers[i]);
547 d->current_data = s->current_data;
548 d->current_length = s->current_length;
549 clib_memcpy (vlib_buffer_get_current (d),
550 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100551 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100552 }
553
554 return fd;
555}
556
Damjan Marionc47ed032017-01-25 14:18:03 +0100557/** \brief Create multiple clones of buffer and store them in the supplied array
558
559 @param vm - (vlib_main_t *) vlib main data structure pointer
560 @param src_buffer - (u32) source buffer index
561 @param buffers - (u32 * ) buffer index array
562 @param n_buffers - (u8) number of buffer clones requested
563 @param head_end_offset - (u16) offset relative to current position
564 where packet head ends
565 @return - (u8) number of buffers actually cloned, may be
566 less than the number requested or zero
567*/
568
569always_inline u8
570vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
571 u8 n_buffers, u16 head_end_offset)
572{
573 u8 i;
574 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
575
576 ASSERT (s->n_add_refs == 0);
577 ASSERT (n_buffers);
578
579 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
580 {
581 buffers[0] = src_buffer;
582 for (i = 1; i < n_buffers; i++)
583 {
584 vlib_buffer_t *d;
585 d = vlib_buffer_copy (vm, s);
586 if (d == 0)
587 return i;
588 buffers[i] = vlib_get_buffer_index (vm, d);
589
590 }
591 return n_buffers;
592 }
593
594 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
Damjan Marion072401e2017-07-13 18:53:27 +0200595 vlib_buffer_get_free_list_index
596 (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100597 if (PREDICT_FALSE (n_buffers == 0))
598 {
599 buffers[0] = src_buffer;
600 return 1;
601 }
602
603 for (i = 0; i < n_buffers; i++)
604 {
605 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
606 d->current_data = s->current_data;
607 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200608 vlib_buffer_set_free_list_index (d,
609 vlib_buffer_get_free_list_index (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100610 d->total_length_not_including_first_buffer =
611 s->total_length_not_including_first_buffer + s->current_length -
612 head_end_offset;
613 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
614 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
615 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
616 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
617 head_end_offset);
618 d->next_buffer = src_buffer;
619 }
620 vlib_buffer_advance (s, head_end_offset);
621 s->n_add_refs = n_buffers - 1;
622 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
623 {
624 s = vlib_get_buffer (vm, s->next_buffer);
625 s->n_add_refs = n_buffers - 1;
626 }
627
628 return n_buffers;
629}
630
631/** \brief Attach cloned tail to the buffer
632
633 @param vm - (vlib_main_t *) vlib main data structure pointer
634 @param head - (vlib_buffer_t *) head buffer
635 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
636*/
637
638always_inline void
639vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
640 vlib_buffer_t * tail)
641{
642 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200643 ASSERT (vlib_buffer_get_free_list_index (head) ==
644 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100645
646 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
647 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
648 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
649 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
650 head->next_buffer = vlib_get_buffer_index (vm, tail);
651 head->total_length_not_including_first_buffer = tail->current_length +
652 tail->total_length_not_including_first_buffer;
653
654next_segment:
655 __sync_add_and_fetch (&tail->n_add_refs, 1);
656
657 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
658 {
659 tail = vlib_get_buffer (vm, tail->next_buffer);
660 goto next_segment;
661 }
662}
663
Pierre Pfister328e99b2016-02-12 13:18:42 +0000664/* Initializes the buffer as an empty packet with no chained buffers. */
665always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400666vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000667{
668 first->total_length_not_including_first_buffer = 0;
669 first->current_length = 0;
670 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
671 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000672}
673
674/* The provided next_bi buffer index is appended to the end of the packet. */
675always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400676vlib_buffer_chain_buffer (vlib_main_t * vm,
677 vlib_buffer_t * first,
678 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000679{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400680 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000681 last->next_buffer = next_bi;
682 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
683 next_buffer->current_length = 0;
684 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000685 return next_buffer;
686}
687
688/* Increases or decreases the packet length.
689 * It does not allocate or deallocate new buffers.
690 * Therefore, the added length must be compatible
691 * with the last buffer. */
692always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400693vlib_buffer_chain_increase_length (vlib_buffer_t * first,
694 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000695{
696 last->current_length += len;
697 if (first != last)
698 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000699}
700
701/* Copy data to the end of the packet and increases its length.
702 * It does not allocate new buffers.
703 * Returns the number of copied bytes. */
704always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400705vlib_buffer_chain_append_data (vlib_main_t * vm,
706 u32 free_list_index,
707 vlib_buffer_t * first,
708 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000709{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400710 u32 n_buffer_bytes =
711 vlib_buffer_free_list_buffer_size (vm, free_list_index);
712 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
713 u16 len = clib_min (data_len,
714 n_buffer_bytes - last->current_length -
715 last->current_data);
716 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
717 len);
718 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000719 return len;
720}
721
722/* Copy data to the end of the packet and increases its length.
723 * Allocates additional buffers from the free list if necessary.
724 * Returns the number of copied bytes.
725 * 'last' value is modified whenever new buffers are allocated and
726 * chained and points to the last buffer in the chain. */
727u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400728vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
729 u32 free_list_index,
730 vlib_buffer_t * first,
731 vlib_buffer_t ** last,
732 void *data, u16 data_len);
733void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000734
Dave Barach9b8ffd92016-07-08 08:13:45 -0400735format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
736 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700737
Dave Barach9b8ffd92016-07-08 08:13:45 -0400738typedef struct
739{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700740 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400741 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700742
Ed Warnickecb9cada2015-12-08 15:45:58 -0700743 /* Number of buffers to allocate in each call to physmem
744 allocator. */
745 u32 min_n_buffers_each_physmem_alloc;
746
747 /* Buffer free list for this template. */
748 u32 free_list_index;
749
Dave Barach9b8ffd92016-07-08 08:13:45 -0400750 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700751} vlib_packet_template_t;
752
753void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
754 vlib_packet_template_t * t);
755
756void vlib_packet_template_init (vlib_main_t * vm,
757 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400758 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700759 uword n_packet_data_bytes,
760 uword min_n_buffers_each_physmem_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400761 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700762
Dave Barach9b8ffd92016-07-08 08:13:45 -0400763void *vlib_packet_template_get_packet (vlib_main_t * vm,
764 vlib_packet_template_t * t,
765 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700766
767always_inline void
768vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
769{
770 vec_free (t->packet_data);
771}
772
773always_inline u32
774unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
775{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400776 serialize_stream_t *s = &m->stream;
777 vlib_serialize_buffer_main_t *sm
778 = uword_to_pointer (m->stream.data_function_opaque,
779 vlib_serialize_buffer_main_t *);
780 vlib_main_t *vm = sm->vlib_main;
781 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700782
783 n = s->n_buffer_bytes - s->current_buffer_index;
784 if (sm->last_buffer != ~0)
785 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400786 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700787 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
788 {
789 b = vlib_get_buffer (vm, b->next_buffer);
790 n += b->current_length;
791 }
792 }
793
Dave Barach9b8ffd92016-07-08 08:13:45 -0400794 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700795 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
796 n += vlib_buffer_index_length_in_chain (vm, f[0]);
797 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400798/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700799
800 return n;
801}
802
Ed Warnickecb9cada2015-12-08 15:45:58 -0700803/* Set a buffer quickly into "uninitialized" state. We want this to
804 be extremely cheap and arrange for all fields that need to be
805 initialized to be in the first 128 bits of the buffer. */
806always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100807vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700808 vlib_buffer_free_list_t * fl)
809{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100810 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700811
Damjan Marion19010202016-03-24 17:17:47 +0100812 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400813 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
814 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
815 CLIB_CACHE_LINE_BYTES);
816 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
817 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +0100818
Ed Warnickecb9cada2015-12-08 15:45:58 -0700819 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200820 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700821
Dave Barachf8690282017-03-01 11:38:02 -0500822 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
823 STRUCT_MARK_PTR (src, template_start),
824 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
825 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
826
827 /* Not in the first 16 octets. */
828 dst->n_add_refs = src->n_add_refs;
829
Ed Warnickecb9cada2015-12-08 15:45:58 -0700830 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500831#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400832 _(current_data);
833 _(current_length);
834 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700835#undef _
Florin Corasb2215d62017-08-01 16:56:58 -0700836 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
837 /* total_length_not_including_first_buffer is not in the template anymore
838 * so it may actually not zeroed for some buffers. One option is to
839 * uncomment the line lower (comes at a cost), the other, is to just not
840 * care */
841 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +0100842 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700843}
844
845always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100846vlib_buffer_add_to_free_list (vlib_main_t * vm,
847 vlib_buffer_free_list_t * f,
848 u32 buffer_index, u8 do_init)
849{
850 vlib_buffer_t *b;
851 b = vlib_get_buffer (vm, buffer_index);
852 if (PREDICT_TRUE (do_init))
853 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100854 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400855
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200856 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -0400857 {
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200858 vlib_buffer_free_list_t *mf;
859 mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
860 clib_spinlock_lock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400861 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200862 vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
863 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400864 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200865 clib_spinlock_unlock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400866 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100867}
868
869always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100870vlib_buffer_init_two_for_free_list (vlib_buffer_t * dst0,
871 vlib_buffer_t * dst1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700872 vlib_buffer_free_list_t * fl)
873{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100874 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700875
876 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200877 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700878
Dave Barachf8690282017-03-01 11:38:02 -0500879 clib_memcpy (STRUCT_MARK_PTR (dst0, template_start),
880 STRUCT_MARK_PTR (src, template_start),
881 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
882 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
883
884 clib_memcpy (STRUCT_MARK_PTR (dst1, template_start),
885 STRUCT_MARK_PTR (src, template_start),
886 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
887 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
888
889 /* Not in the first 16 octets. */
890 dst0->n_add_refs = src->n_add_refs;
891 dst1->n_add_refs = src->n_add_refs;
892
Ed Warnickecb9cada2015-12-08 15:45:58 -0700893 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500894#define _(f) ASSERT (dst0->f == src->f); ASSERT( dst1->f == src->f)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400895 _(current_data);
896 _(current_length);
897 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700898#undef _
Dave Barachf8690282017-03-01 11:38:02 -0500899
900 ASSERT (dst0->total_length_not_including_first_buffer == 0);
901 ASSERT (dst1->total_length_not_including_first_buffer == 0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100902 ASSERT (dst0->n_add_refs == 0);
903 ASSERT (dst1->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700904}
905
906#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +0100907extern u32 *vlib_buffer_state_validation_lock;
908extern uword *vlib_buffer_state_validation_hash;
909extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700910#endif
911
Dave Barach9b8ffd92016-07-08 08:13:45 -0400912static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700913vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
914{
915#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400916 uword *p;
917 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700918
919 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
920
921 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
922 ;
923
924 p = hash_get (vlib_buffer_state_validation_hash, b);
925
926 /* If we don't know about b, declare it to be in the expected state */
927 if (!p)
928 {
929 hash_set (vlib_buffer_state_validation_hash, b, expected);
930 goto out;
931 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400932
Ed Warnickecb9cada2015-12-08 15:45:58 -0700933 if (p[0] != expected)
934 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400935 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700936 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400937 vlib_main_t *vm = &vlib_global_main;
938
939 cj_stop ();
940
Ed Warnickecb9cada2015-12-08 15:45:58 -0700941 bi = vlib_get_buffer_index (vm, b);
942
943 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400944 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
945 vlib_time_now (vm), bi,
946 p[0] ? "busy" : "free", expected ? "busy" : "free");
947 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700948 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400949out:
950 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700951 *vlib_buffer_state_validation_lock = 0;
952 clib_mem_set_heap (oldheap);
953#endif
954}
955
Dave Barach9b8ffd92016-07-08 08:13:45 -0400956static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700957vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
958{
959#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400960 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700961
962 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
963
964 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
965 ;
966
967 hash_set (vlib_buffer_state_validation_hash, b, expected);
968
Dave Barach9b8ffd92016-07-08 08:13:45 -0400969 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700970 *vlib_buffer_state_validation_lock = 0;
971 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400972#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700973}
974
975#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400976
977/*
978 * fd.io coding-style-patch-verification: ON
979 *
980 * Local Variables:
981 * eval: (c-set-style "gnu")
982 * End:
983 */