blob: 79e3e69c9193cbbb272d2db903c389828becf95f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Marion04a7f052017-07-10 15:06:17 +020059 vlib_buffer_main_t *bm = vm->buffer_main;
60 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
66/** \brief Translate buffer pointer into buffer index
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -040069 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -070070 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -040071*/
Damjan Marion04a7f052017-07-10 15:06:17 +020072
Ed Warnickecb9cada2015-12-08 15:45:58 -070073always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -040074vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070075{
Damjan Marion04a7f052017-07-10 15:06:17 +020076 vlib_buffer_main_t *bm = vm->buffer_main;
77 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
78 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
79 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
82}
83
84/** \brief Get next buffer in buffer linklist, or zero for end of list.
85
86 @param vm - (vlib_main_t *) vlib main data structure pointer
87 @param b - (void *) buffer pointer
88 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -040089*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070090always_inline vlib_buffer_t *
91vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
92{
93 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070095}
96
Dave Barach9b8ffd92016-07-08 08:13:45 -040097uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
98 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
100/** \brief Get length in bytes of the buffer chain
101
102 @param vm - (vlib_main_t *) vlib main data structure pointer
103 @param b - (void *) buffer pointer
104 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400105*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106always_inline uword
107vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
108{
109 uword l = b->current_length + b->total_length_not_including_first_buffer;
110 if (PREDICT_FALSE ((b->flags & (VLIB_BUFFER_NEXT_PRESENT
111 | VLIB_BUFFER_TOTAL_LENGTH_VALID))
112 == VLIB_BUFFER_NEXT_PRESENT))
113 return vlib_buffer_length_in_chain_slow_path (vm, b);
114 return l;
115}
116
117/** \brief Get length in bytes of the buffer index buffer chain
118
119 @param vm - (vlib_main_t *) vlib main data structure pointer
120 @param bi - (u32) buffer index
121 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400122*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123always_inline uword
124vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
125{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400126 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127 return vlib_buffer_length_in_chain (vm, b);
128}
129
130/** \brief Copy buffer contents to memory
131
132 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400133 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134 @param contents - (u8 *) memory, <strong>must be large enough</strong>
135 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400136*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137always_inline uword
138vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
139{
140 uword content_len = 0;
141 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400142 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143
144 while (1)
145 {
146 b = vlib_get_buffer (vm, buffer_index);
147 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100148 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400150 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151 break;
152 buffer_index = b->next_buffer;
153 }
154
155 return content_len;
156}
157
158/* Return physical address of buffer->data start. */
159always_inline u64
160vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
161{
162 return vlib_physmem_offset_to_physical (&vm->physmem_main,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400163 (((uword) buffer_index) <<
Ed Warnickecb9cada2015-12-08 15:45:58 -0700164 CLIB_LOG2_CACHE_LINE_BYTES) +
Dave Barach9b8ffd92016-07-08 08:13:45 -0400165 STRUCT_OFFSET_OF (vlib_buffer_t,
166 data));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167}
168
169/** \brief Prefetch buffer metadata by buffer index
170 The first 64 bytes of buffer contains most header information
171
172 @param vm - (vlib_main_t *) vlib main data structure pointer
173 @param bi - (u32) buffer index
174 @param type - LOAD, STORE. In most cases, STORE is the right answer
175*/
176/* Prefetch buffer header given index. */
177#define vlib_prefetch_buffer_with_index(vm,bi,type) \
178 do { \
179 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
180 vlib_prefetch_buffer_header (_b, type); \
181 } while (0)
182
183#if 0
184/* Iterate over known allocated vlib bufs. You probably do not want
185 * to do this!
186 @param vm the vlib_main_t
187 @param bi found allocated buffer index
188 @param body operation to perform on buffer index
189 function executes body for each allocated buffer index
190 */
191#define vlib_buffer_foreach_allocated(vm,bi,body) \
192do { \
193 vlib_main_t * _vmain = (vm); \
194 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
195 hash_pair_t * _vbpair; \
196 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
197 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
198 (bi) = _vbpair->key; \
199 body; \
200 } \
201 })); \
202} while (0)
203#endif
204
Dave Barach9b8ffd92016-07-08 08:13:45 -0400205typedef enum
206{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207 /* Index is unknown. */
208 VLIB_BUFFER_UNKNOWN,
209
210 /* Index is known and free/allocated. */
211 VLIB_BUFFER_KNOWN_FREE,
212 VLIB_BUFFER_KNOWN_ALLOCATED,
213} vlib_buffer_known_state_t;
214
215always_inline vlib_buffer_known_state_t
216vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
217{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400218 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion586afd72017-04-05 19:18:20 +0200219 ASSERT (vlib_get_thread_index () == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220
Dave Barach9b8ffd92016-07-08 08:13:45 -0400221 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700222 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
223}
224
225always_inline void
226vlib_buffer_set_known_state (vlib_main_t * vm,
227 u32 buffer_index,
228 vlib_buffer_known_state_t state)
229{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400230 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion586afd72017-04-05 19:18:20 +0200231 ASSERT (vlib_get_thread_index () == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232 hash_set (bm->buffer_known_hash, buffer_index, state);
233}
234
235/* Validates sanity of a single buffer.
236 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400237u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
238 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700239
Dave Barach9b8ffd92016-07-08 08:13:45 -0400240clib_error_t *vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
241 unsigned socket_id);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242
243/** \brief Allocate buffers into supplied array
244
245 @param vm - (vlib_main_t *) vlib main data structure pointer
246 @param buffers - (u32 * ) buffer index array
247 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400248 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249 less than the number requested or zero
250*/
Damjan Marion878c6092017-01-04 13:19:27 +0100251always_inline u32
252vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
253{
254 vlib_buffer_main_t *bm = vm->buffer_main;
255
256 ASSERT (bm->cb.vlib_buffer_alloc_cb);
257
258 return bm->cb.vlib_buffer_alloc_cb (vm, buffers, n_buffers);
259}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260
261always_inline u32
262vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400263{
264 return round_pow2 (size, sizeof (vlib_buffer_t));
265}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
267/** \brief Allocate buffers from specific freelist into supplied array
268
269 @param vm - (vlib_main_t *) vlib main data structure pointer
270 @param buffers - (u32 * ) buffer index array
271 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273 less than the number requested or zero
274*/
Damjan Marion878c6092017-01-04 13:19:27 +0100275always_inline u32
276vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
277 u32 * buffers,
278 u32 n_buffers, u32 free_list_index)
279{
280 vlib_buffer_main_t *bm = vm->buffer_main;
281
282 ASSERT (bm->cb.vlib_buffer_alloc_from_free_list_cb);
283
284 return bm->cb.vlib_buffer_alloc_from_free_list_cb (vm, buffers, n_buffers,
285 free_list_index);
286}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
288/** \brief Free buffers
289 Frees the entire buffer chain for each buffer
290
291 @param vm - (vlib_main_t *) vlib main data structure pointer
292 @param buffers - (u32 * ) buffer index array
293 @param n_buffers - (u32) number of buffers to free
294
295*/
Damjan Marion878c6092017-01-04 13:19:27 +0100296always_inline void
297vlib_buffer_free (vlib_main_t * vm,
298 /* pointer to first buffer */
299 u32 * buffers,
300 /* number of buffers to free */
301 u32 n_buffers)
302{
303 vlib_buffer_main_t *bm = vm->buffer_main;
304
305 ASSERT (bm->cb.vlib_buffer_free_cb);
306
307 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
308}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309
310/** \brief Free buffers, does not free the buffer chain for each buffer
311
312 @param vm - (vlib_main_t *) vlib main data structure pointer
313 @param buffers - (u32 * ) buffer index array
314 @param n_buffers - (u32) number of buffers to free
315
316*/
Damjan Marion878c6092017-01-04 13:19:27 +0100317always_inline void
318vlib_buffer_free_no_next (vlib_main_t * vm,
319 /* pointer to first buffer */
320 u32 * buffers,
321 /* number of buffers to free */
322 u32 n_buffers)
323{
324 vlib_buffer_main_t *bm = vm->buffer_main;
325
326 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
327
328 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
329}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330
331/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400332 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333
334 @param vm - (vlib_main_t *) vlib main data structure pointer
335 @param buffer_index - (u32) buffer index to free
336*/
337always_inline void
338vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
339{
340 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
341}
342
343/* Add/delete buffer free lists. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400344u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
345 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100346always_inline void
347vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
348{
349 vlib_buffer_main_t *bm = vm->buffer_main;
350
351 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
352
353 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
354}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700355
356/* Find already existing public free list with given size or create one. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400357u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
358 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700359
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100360/* Merge two free lists */
361void vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
362 vlib_buffer_free_list_t * src);
363
364/* Make sure we have at least given number of unaligned buffers. */
365void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
366 vlib_buffer_free_list_t *
367 free_list,
368 uword n_unaligned_buffers);
369
370always_inline u32
371vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
372{
373 vlib_buffer_main_t *bm = vm->buffer_main;
374
375 size = vlib_buffer_round_size (size);
376 uword *p = hash_get (bm->free_list_by_size, size);
377 return p ? p[0] : ~0;
378}
379
380always_inline vlib_buffer_free_list_t *
381vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
382 u32 * index)
383{
384 vlib_buffer_main_t *bm = vm->buffer_main;
385 u32 i;
386
387 *index = i = b->free_list_index;
388 return pool_elt_at_index (bm->buffer_free_list_pool, i);
389}
390
Ed Warnickecb9cada2015-12-08 15:45:58 -0700391always_inline vlib_buffer_free_list_t *
392vlib_buffer_get_free_list (vlib_main_t * vm, u32 free_list_index)
393{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400394 vlib_buffer_main_t *bm = vm->buffer_main;
395 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396
397 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
398
399 /* Sanity: indices must match. */
400 ASSERT (f->index == free_list_index);
401
402 return f;
403}
404
405always_inline u32
406vlib_buffer_free_list_buffer_size (vlib_main_t * vm, u32 free_list_index)
407{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400408 vlib_buffer_free_list_t *f =
409 vlib_buffer_get_free_list (vm, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700410 return f->n_data_bytes;
411}
412
Dave Barach9b8ffd92016-07-08 08:13:45 -0400413void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414
415/* Reasonably fast buffer copy routine. */
416always_inline void
417vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
418{
419 while (n >= 4)
420 {
421 dst[0] = src[0];
422 dst[1] = src[1];
423 dst[2] = src[2];
424 dst[3] = src[3];
425 dst += 4;
426 src += 4;
427 n -= 4;
428 }
429 while (n > 0)
430 {
431 dst[0] = src[0];
432 dst += 1;
433 src += 1;
434 n -= 1;
435 }
436}
437
438always_inline void *
439vlib_physmem_alloc_aligned (vlib_main_t * vm, clib_error_t ** error,
440 uword n_bytes, uword alignment)
441{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400442 void *r =
443 vm->os_physmem_alloc_aligned (&vm->physmem_main, n_bytes, alignment);
444 if (!r)
445 *error =
446 clib_error_return (0, "failed to allocate %wd bytes of I/O memory",
447 n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700448 else
449 *error = 0;
450 return r;
451}
452
453/* By default allocate I/O memory with cache line alignment. */
454always_inline void *
455vlib_physmem_alloc (vlib_main_t * vm, clib_error_t ** error, uword n_bytes)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400456{
457 return vlib_physmem_alloc_aligned (vm, error, n_bytes,
458 CLIB_CACHE_LINE_BYTES);
459}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700460
461always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400462vlib_physmem_free (vlib_main_t * vm, void *mem)
463{
464 return vm->os_physmem_free (mem);
465}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700466
467always_inline u64
Dave Barach9b8ffd92016-07-08 08:13:45 -0400468vlib_physmem_virtual_to_physical (vlib_main_t * vm, void *mem)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700469{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400470 vlib_physmem_main_t *pm = &vm->physmem_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471 uword o = pointer_to_uword (mem) - pm->virtual.start;
472 return vlib_physmem_offset_to_physical (pm, o);
473}
474
475/* Append given data to end of buffer, possibly allocating new buffers. */
476u32 vlib_buffer_add_data (vlib_main_t * vm,
477 u32 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400478 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700479
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100480/* duplicate all buffers in chain */
481always_inline vlib_buffer_t *
482vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
483{
484 vlib_buffer_t *s, *d, *fd;
485 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100486 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100487 int i;
488
489 s = b;
490 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
491 {
492 n_buffers++;
493 s = vlib_get_buffer (vm, s->next_buffer);
494 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700495 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100496
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100497 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500498
499 /* No guarantee that we'll get all the buffers we asked for */
500 if (PREDICT_FALSE (n_alloc < n_buffers))
501 {
502 if (n_alloc > 0)
503 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500504 return 0;
505 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100506
507 /* 1st segment */
508 s = b;
509 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100510 d->current_data = s->current_data;
511 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100512 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100513 d->total_length_not_including_first_buffer =
514 s->total_length_not_including_first_buffer;
515 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Damjan Mariondce05452016-12-01 11:59:33 +0100516 clib_memcpy (vlib_buffer_get_current (d),
517 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100518
519 /* next segments */
520 for (i = 1; i < n_buffers; i++)
521 {
522 /* previous */
523 d->next_buffer = new_buffers[i];
524 /* current */
525 s = vlib_get_buffer (vm, s->next_buffer);
526 d = vlib_get_buffer (vm, new_buffers[i]);
527 d->current_data = s->current_data;
528 d->current_length = s->current_length;
529 clib_memcpy (vlib_buffer_get_current (d),
530 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100531 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100532 }
533
534 return fd;
535}
536
Damjan Marionc47ed032017-01-25 14:18:03 +0100537/** \brief Create multiple clones of buffer and store them in the supplied array
538
539 @param vm - (vlib_main_t *) vlib main data structure pointer
540 @param src_buffer - (u32) source buffer index
541 @param buffers - (u32 * ) buffer index array
542 @param n_buffers - (u8) number of buffer clones requested
543 @param head_end_offset - (u16) offset relative to current position
544 where packet head ends
545 @return - (u8) number of buffers actually cloned, may be
546 less than the number requested or zero
547*/
548
549always_inline u8
550vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
551 u8 n_buffers, u16 head_end_offset)
552{
553 u8 i;
554 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
555
556 ASSERT (s->n_add_refs == 0);
557 ASSERT (n_buffers);
558
559 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
560 {
561 buffers[0] = src_buffer;
562 for (i = 1; i < n_buffers; i++)
563 {
564 vlib_buffer_t *d;
565 d = vlib_buffer_copy (vm, s);
566 if (d == 0)
567 return i;
568 buffers[i] = vlib_get_buffer_index (vm, d);
569
570 }
571 return n_buffers;
572 }
573
574 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
575 s->free_list_index);
576 if (PREDICT_FALSE (n_buffers == 0))
577 {
578 buffers[0] = src_buffer;
579 return 1;
580 }
581
582 for (i = 0; i < n_buffers; i++)
583 {
584 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
585 d->current_data = s->current_data;
586 d->current_length = head_end_offset;
587 d->free_list_index = s->free_list_index;
588 d->total_length_not_including_first_buffer =
589 s->total_length_not_including_first_buffer + s->current_length -
590 head_end_offset;
591 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
592 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
593 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
594 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
595 head_end_offset);
596 d->next_buffer = src_buffer;
597 }
598 vlib_buffer_advance (s, head_end_offset);
599 s->n_add_refs = n_buffers - 1;
600 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
601 {
602 s = vlib_get_buffer (vm, s->next_buffer);
603 s->n_add_refs = n_buffers - 1;
604 }
605
606 return n_buffers;
607}
608
609/** \brief Attach cloned tail to the buffer
610
611 @param vm - (vlib_main_t *) vlib main data structure pointer
612 @param head - (vlib_buffer_t *) head buffer
613 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
614*/
615
616always_inline void
617vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
618 vlib_buffer_t * tail)
619{
620 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
621 ASSERT (head->free_list_index == tail->free_list_index);
622
623 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
624 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
625 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
626 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
627 head->next_buffer = vlib_get_buffer_index (vm, tail);
628 head->total_length_not_including_first_buffer = tail->current_length +
629 tail->total_length_not_including_first_buffer;
630
631next_segment:
632 __sync_add_and_fetch (&tail->n_add_refs, 1);
633
634 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
635 {
636 tail = vlib_get_buffer (vm, tail->next_buffer);
637 goto next_segment;
638 }
639}
640
Pierre Pfister328e99b2016-02-12 13:18:42 +0000641/* Initializes the buffer as an empty packet with no chained buffers. */
642always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400643vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000644{
645 first->total_length_not_including_first_buffer = 0;
646 first->current_length = 0;
647 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
648 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000649}
650
651/* The provided next_bi buffer index is appended to the end of the packet. */
652always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400653vlib_buffer_chain_buffer (vlib_main_t * vm,
654 vlib_buffer_t * first,
655 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000656{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400657 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000658 last->next_buffer = next_bi;
659 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
660 next_buffer->current_length = 0;
661 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000662 return next_buffer;
663}
664
665/* Increases or decreases the packet length.
666 * It does not allocate or deallocate new buffers.
667 * Therefore, the added length must be compatible
668 * with the last buffer. */
669always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400670vlib_buffer_chain_increase_length (vlib_buffer_t * first,
671 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000672{
673 last->current_length += len;
674 if (first != last)
675 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000676}
677
678/* Copy data to the end of the packet and increases its length.
679 * It does not allocate new buffers.
680 * Returns the number of copied bytes. */
681always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400682vlib_buffer_chain_append_data (vlib_main_t * vm,
683 u32 free_list_index,
684 vlib_buffer_t * first,
685 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000686{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400687 u32 n_buffer_bytes =
688 vlib_buffer_free_list_buffer_size (vm, free_list_index);
689 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
690 u16 len = clib_min (data_len,
691 n_buffer_bytes - last->current_length -
692 last->current_data);
693 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
694 len);
695 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000696 return len;
697}
698
699/* Copy data to the end of the packet and increases its length.
700 * Allocates additional buffers from the free list if necessary.
701 * Returns the number of copied bytes.
702 * 'last' value is modified whenever new buffers are allocated and
703 * chained and points to the last buffer in the chain. */
704u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400705vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
706 u32 free_list_index,
707 vlib_buffer_t * first,
708 vlib_buffer_t ** last,
709 void *data, u16 data_len);
710void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000711
Dave Barach9b8ffd92016-07-08 08:13:45 -0400712format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
713 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700714
Dave Barach9b8ffd92016-07-08 08:13:45 -0400715typedef struct
716{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700717 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400718 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700719
Ed Warnickecb9cada2015-12-08 15:45:58 -0700720 /* Number of buffers to allocate in each call to physmem
721 allocator. */
722 u32 min_n_buffers_each_physmem_alloc;
723
724 /* Buffer free list for this template. */
725 u32 free_list_index;
726
Dave Barach9b8ffd92016-07-08 08:13:45 -0400727 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700728} vlib_packet_template_t;
729
730void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
731 vlib_packet_template_t * t);
732
733void vlib_packet_template_init (vlib_main_t * vm,
734 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400735 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700736 uword n_packet_data_bytes,
737 uword min_n_buffers_each_physmem_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400738 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700739
Dave Barach9b8ffd92016-07-08 08:13:45 -0400740void *vlib_packet_template_get_packet (vlib_main_t * vm,
741 vlib_packet_template_t * t,
742 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700743
744always_inline void
745vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
746{
747 vec_free (t->packet_data);
748}
749
750always_inline u32
751unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
752{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400753 serialize_stream_t *s = &m->stream;
754 vlib_serialize_buffer_main_t *sm
755 = uword_to_pointer (m->stream.data_function_opaque,
756 vlib_serialize_buffer_main_t *);
757 vlib_main_t *vm = sm->vlib_main;
758 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700759
760 n = s->n_buffer_bytes - s->current_buffer_index;
761 if (sm->last_buffer != ~0)
762 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400763 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700764 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
765 {
766 b = vlib_get_buffer (vm, b->next_buffer);
767 n += b->current_length;
768 }
769 }
770
Dave Barach9b8ffd92016-07-08 08:13:45 -0400771 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700772 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
773 n += vlib_buffer_index_length_in_chain (vm, f[0]);
774 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400775/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700776
777 return n;
778}
779
Ed Warnickecb9cada2015-12-08 15:45:58 -0700780/* Set a buffer quickly into "uninitialized" state. We want this to
781 be extremely cheap and arrange for all fields that need to be
782 initialized to be in the first 128 bits of the buffer. */
783always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100784vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700785 vlib_buffer_free_list_t * fl)
786{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100787 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700788
Damjan Marion19010202016-03-24 17:17:47 +0100789 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400790 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
791 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
792 CLIB_CACHE_LINE_BYTES);
793 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
794 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +0100795
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796 /* Make sure buffer template is sane. */
797 ASSERT (fl->index == fl->buffer_init_template.free_list_index);
798
Dave Barachf8690282017-03-01 11:38:02 -0500799 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
800 STRUCT_MARK_PTR (src, template_start),
801 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
802 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
803
804 /* Not in the first 16 octets. */
805 dst->n_add_refs = src->n_add_refs;
806
Ed Warnickecb9cada2015-12-08 15:45:58 -0700807 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500808#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400809 _(current_data);
810 _(current_length);
811 _(flags);
812 _(free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700813#undef _
Dave Barachf8690282017-03-01 11:38:02 -0500814 ASSERT (dst->total_length_not_including_first_buffer == 0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100815 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700816}
817
818always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100819vlib_buffer_add_to_free_list (vlib_main_t * vm,
820 vlib_buffer_free_list_t * f,
821 u32 buffer_index, u8 do_init)
822{
823 vlib_buffer_t *b;
824 b = vlib_get_buffer (vm, buffer_index);
825 if (PREDICT_TRUE (do_init))
826 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100827 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100828}
829
830always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100831vlib_buffer_init_two_for_free_list (vlib_buffer_t * dst0,
832 vlib_buffer_t * dst1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700833 vlib_buffer_free_list_t * fl)
834{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100835 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700836
837 /* Make sure buffer template is sane. */
838 ASSERT (fl->index == fl->buffer_init_template.free_list_index);
839
Dave Barachf8690282017-03-01 11:38:02 -0500840 clib_memcpy (STRUCT_MARK_PTR (dst0, template_start),
841 STRUCT_MARK_PTR (src, template_start),
842 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
843 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
844
845 clib_memcpy (STRUCT_MARK_PTR (dst1, template_start),
846 STRUCT_MARK_PTR (src, template_start),
847 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
848 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
849
850 /* Not in the first 16 octets. */
851 dst0->n_add_refs = src->n_add_refs;
852 dst1->n_add_refs = src->n_add_refs;
853
Ed Warnickecb9cada2015-12-08 15:45:58 -0700854 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500855#define _(f) ASSERT (dst0->f == src->f); ASSERT( dst1->f == src->f)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400856 _(current_data);
857 _(current_length);
858 _(flags);
859 _(free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700860#undef _
Dave Barachf8690282017-03-01 11:38:02 -0500861
862 ASSERT (dst0->total_length_not_including_first_buffer == 0);
863 ASSERT (dst1->total_length_not_including_first_buffer == 0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100864 ASSERT (dst0->n_add_refs == 0);
865 ASSERT (dst1->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700866}
867
868#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +0100869extern u32 *vlib_buffer_state_validation_lock;
870extern uword *vlib_buffer_state_validation_hash;
871extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700872#endif
873
Dave Barach9b8ffd92016-07-08 08:13:45 -0400874static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700875vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
876{
877#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400878 uword *p;
879 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700880
881 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
882
883 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
884 ;
885
886 p = hash_get (vlib_buffer_state_validation_hash, b);
887
888 /* If we don't know about b, declare it to be in the expected state */
889 if (!p)
890 {
891 hash_set (vlib_buffer_state_validation_hash, b, expected);
892 goto out;
893 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400894
Ed Warnickecb9cada2015-12-08 15:45:58 -0700895 if (p[0] != expected)
896 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400897 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700898 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400899 vlib_main_t *vm = &vlib_global_main;
900
901 cj_stop ();
902
Ed Warnickecb9cada2015-12-08 15:45:58 -0700903 bi = vlib_get_buffer_index (vm, b);
904
905 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400906 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
907 vlib_time_now (vm), bi,
908 p[0] ? "busy" : "free", expected ? "busy" : "free");
909 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700910 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400911out:
912 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700913 *vlib_buffer_state_validation_lock = 0;
914 clib_mem_set_heap (oldheap);
915#endif
916}
917
Dave Barach9b8ffd92016-07-08 08:13:45 -0400918static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700919vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
920{
921#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400922 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700923
924 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
925
926 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
927 ;
928
929 hash_set (vlib_buffer_state_validation_hash, b, expected);
930
Dave Barach9b8ffd92016-07-08 08:13:45 -0400931 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700932 *vlib_buffer_state_validation_lock = 0;
933 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400934#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700935}
936
937#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400938
939/*
940 * fd.io coding-style-patch-verification: ON
941 *
942 * Local Variables:
943 * eval: (c-set-style "gnu")
944 * End:
945 */