blob: 06cc6dac61e2b1059494b41c9e05ba4a5eefbcaf [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
44
45/** \file
46 vlib buffer access methods.
47*/
48
49
50/** \brief Translate buffer index into buffer pointer
51
52 @param vm - (vlib_main_t *) vlib main data structure pointer
53 @param buffer_index - (u32) buffer index
54 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040055*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070056always_inline vlib_buffer_t *
57vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
58{
Damjan Marion04a7f052017-07-10 15:06:17 +020059 vlib_buffer_main_t *bm = vm->buffer_main;
60 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
61 ASSERT (offset < bm->buffer_mem_size);
62
63 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064}
65
66/** \brief Translate buffer pointer into buffer index
67
68 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -040069 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -070070 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -040071*/
Damjan Marion04a7f052017-07-10 15:06:17 +020072
Ed Warnickecb9cada2015-12-08 15:45:58 -070073always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -040074vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070075{
Damjan Marion04a7f052017-07-10 15:06:17 +020076 vlib_buffer_main_t *bm = vm->buffer_main;
77 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
78 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
79 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
82}
83
84/** \brief Get next buffer in buffer linklist, or zero for end of list.
85
86 @param vm - (vlib_main_t *) vlib main data structure pointer
87 @param b - (void *) buffer pointer
88 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -040089*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070090always_inline vlib_buffer_t *
91vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
92{
93 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070095}
96
Dave Barach9b8ffd92016-07-08 08:13:45 -040097uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
98 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -070099
100/** \brief Get length in bytes of the buffer chain
101
102 @param vm - (vlib_main_t *) vlib main data structure pointer
103 @param b - (void *) buffer pointer
104 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400105*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106always_inline uword
107vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
108{
Damjan Marion072401e2017-07-13 18:53:27 +0200109 uword len = b->current_length;
110
111 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
112 return len;
113
114 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
115 return len + b->total_length_not_including_first_buffer;
116
117 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118}
119
120/** \brief Get length in bytes of the buffer index buffer chain
121
122 @param vm - (vlib_main_t *) vlib main data structure pointer
123 @param bi - (u32) buffer index
124 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400125*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126always_inline uword
127vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
128{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400129 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130 return vlib_buffer_length_in_chain (vm, b);
131}
132
133/** \brief Copy buffer contents to memory
134
135 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400136 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 @param contents - (u8 *) memory, <strong>must be large enough</strong>
138 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400139*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140always_inline uword
141vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
142{
143 uword content_len = 0;
144 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400145 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146
147 while (1)
148 {
149 b = vlib_get_buffer (vm, buffer_index);
150 l = b->current_length;
Damjan Marionf1213b82016-03-13 02:22:06 +0100151 clib_memcpy (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400153 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154 break;
155 buffer_index = b->next_buffer;
156 }
157
158 return content_len;
159}
160
161/* Return physical address of buffer->data start. */
162always_inline u64
163vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
164{
Damjan Marioncef87f12017-10-05 15:32:41 +0200165 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion149ba772017-10-12 13:09:26 +0200166 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
Damjan Marioncef87f12017-10-05 15:32:41 +0200167 vlib_buffer_pool_t *pool = vec_elt_at_index (bm->buffer_pools,
168 b->buffer_pool_index);
169
170 return vlib_physmem_virtual_to_physical (vm, pool->physmem_region, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171}
172
173/** \brief Prefetch buffer metadata by buffer index
174 The first 64 bytes of buffer contains most header information
175
176 @param vm - (vlib_main_t *) vlib main data structure pointer
177 @param bi - (u32) buffer index
178 @param type - LOAD, STORE. In most cases, STORE is the right answer
179*/
180/* Prefetch buffer header given index. */
181#define vlib_prefetch_buffer_with_index(vm,bi,type) \
182 do { \
183 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
184 vlib_prefetch_buffer_header (_b, type); \
185 } while (0)
186
187#if 0
188/* Iterate over known allocated vlib bufs. You probably do not want
189 * to do this!
190 @param vm the vlib_main_t
191 @param bi found allocated buffer index
192 @param body operation to perform on buffer index
193 function executes body for each allocated buffer index
194 */
195#define vlib_buffer_foreach_allocated(vm,bi,body) \
196do { \
197 vlib_main_t * _vmain = (vm); \
198 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
199 hash_pair_t * _vbpair; \
200 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
201 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
202 (bi) = _vbpair->key; \
203 body; \
204 } \
205 })); \
206} while (0)
207#endif
208
Dave Barach9b8ffd92016-07-08 08:13:45 -0400209typedef enum
210{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700211 /* Index is unknown. */
212 VLIB_BUFFER_UNKNOWN,
213
214 /* Index is known and free/allocated. */
215 VLIB_BUFFER_KNOWN_FREE,
216 VLIB_BUFFER_KNOWN_ALLOCATED,
217} vlib_buffer_known_state_t;
218
Damjan Marionc8a26c62017-11-24 20:15:23 +0100219void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
220 uword n_buffers,
221 vlib_buffer_known_state_t
222 expected_state);
223
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224always_inline vlib_buffer_known_state_t
225vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
226{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400227 vlib_buffer_main_t *bm = vm->buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228
Damjan Marion6b0f5892017-07-27 04:01:24 -0400229 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400230 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400231 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
233}
234
235always_inline void
236vlib_buffer_set_known_state (vlib_main_t * vm,
237 u32 buffer_index,
238 vlib_buffer_known_state_t state)
239{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400240 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marion6b0f5892017-07-27 04:01:24 -0400241 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400243 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244}
245
246/* Validates sanity of a single buffer.
247 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400248u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
249 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251always_inline u32
252vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253{
254 return round_pow2 (size, sizeof (vlib_buffer_t));
255}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256
Damjan Marion072401e2017-07-13 18:53:27 +0200257always_inline u32
258vlib_buffer_get_free_list_index (vlib_buffer_t * b)
259{
260 return b->flags & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
261}
262
263always_inline void
264vlib_buffer_set_free_list_index (vlib_buffer_t * b, u32 index)
265{
266 /* if there is an need for more free lists we should consider
267 storig data in the 2nd cacheline */
268 ASSERT (VLIB_BUFFER_FREE_LIST_INDEX_MASK & 1);
269 ASSERT (index <= VLIB_BUFFER_FREE_LIST_INDEX_MASK);
270
271 b->flags &= ~VLIB_BUFFER_FREE_LIST_INDEX_MASK;
272 b->flags |= index & VLIB_BUFFER_FREE_LIST_INDEX_MASK;
273}
274
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275/** \brief Allocate buffers from specific freelist into supplied array
276
277 @param vm - (vlib_main_t *) vlib main data structure pointer
278 @param buffers - (u32 * ) buffer index array
279 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400280 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281 less than the number requested or zero
282*/
Damjan Marion878c6092017-01-04 13:19:27 +0100283always_inline u32
284vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
285 u32 * buffers,
286 u32 n_buffers, u32 free_list_index)
287{
288 vlib_buffer_main_t *bm = vm->buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100289 vlib_buffer_free_list_t *fl;
290 u32 *src;
291 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100292
Damjan Marionc8a26c62017-11-24 20:15:23 +0100293 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100294
Damjan Marionc8a26c62017-11-24 20:15:23 +0100295 fl = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
296
297 len = vec_len (fl->buffers);
298
299 if (PREDICT_FALSE (len < n_buffers))
300 {
301 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
302 len = vec_len (fl->buffers);
303
304 /* even if fill free list didn't manage to refill free list
305 we should give what we have */
306 n_buffers = clib_min (len, n_buffers);
307
308 /* following code is intentionaly duplicated to allow compiler
309 to optimize fast path when n_buffers is constant value */
310 src = fl->buffers + len - n_buffers;
311 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
312 _vec_len (fl->buffers) -= n_buffers;
313
314 /* Verify that buffers are known free. */
315 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
316 VLIB_BUFFER_KNOWN_FREE);
317
318 return n_buffers;
319 }
320
321 src = fl->buffers + len - n_buffers;
322 clib_memcpy (buffers, src, n_buffers * sizeof (u32));
323 _vec_len (fl->buffers) -= n_buffers;
324
325 /* Verify that buffers are known free. */
326 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
327 VLIB_BUFFER_KNOWN_FREE);
328
329 return n_buffers;
330}
331
332/** \brief Allocate buffers into supplied array
333
334 @param vm - (vlib_main_t *) vlib main data structure pointer
335 @param buffers - (u32 * ) buffer index array
336 @param n_buffers - (u32) number of buffers requested
337 @return - (u32) number of buffers actually allocated, may be
338 less than the number requested or zero
339*/
340always_inline u32
341vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
342{
343 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
344 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100345}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346
Damjan Marionc58408c2018-01-18 14:54:04 +0100347/** \brief Allocate buffers into ring
348
349 @param vm - (vlib_main_t *) vlib main data structure pointer
350 @param buffers - (u32 * ) buffer index ring
351 @param start - (u32) first slot in the ring
352 @param ring_size - (u32) ring size
353 @param n_buffers - (u32) number of buffers requested
354 @return - (u32) number of buffers actually allocated, may be
355 less than the number requested or zero
356*/
357always_inline u32
358vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
359 u32 ring_size, u32 n_buffers)
360{
361 u32 n_alloc;
362
363 ASSERT (n_buffers <= ring_size);
364
365 if (PREDICT_TRUE (start + n_buffers <= ring_size))
366 return vlib_buffer_alloc (vm, ring + start, n_buffers);
367
368 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
369
370 if (PREDICT_TRUE (n_alloc == ring_size - start))
371 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
372
373 return n_alloc;
374}
375
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376/** \brief Free buffers
377 Frees the entire buffer chain for each buffer
378
379 @param vm - (vlib_main_t *) vlib main data structure pointer
380 @param buffers - (u32 * ) buffer index array
381 @param n_buffers - (u32) number of buffers to free
382
383*/
Damjan Marion878c6092017-01-04 13:19:27 +0100384always_inline void
385vlib_buffer_free (vlib_main_t * vm,
386 /* pointer to first buffer */
387 u32 * buffers,
388 /* number of buffers to free */
389 u32 n_buffers)
390{
391 vlib_buffer_main_t *bm = vm->buffer_main;
392
393 ASSERT (bm->cb.vlib_buffer_free_cb);
394
395 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
396}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397
398/** \brief Free buffers, does not free the buffer chain for each buffer
399
400 @param vm - (vlib_main_t *) vlib main data structure pointer
401 @param buffers - (u32 * ) buffer index array
402 @param n_buffers - (u32) number of buffers to free
403
404*/
Damjan Marion878c6092017-01-04 13:19:27 +0100405always_inline void
406vlib_buffer_free_no_next (vlib_main_t * vm,
407 /* pointer to first buffer */
408 u32 * buffers,
409 /* number of buffers to free */
410 u32 n_buffers)
411{
412 vlib_buffer_main_t *bm = vm->buffer_main;
413
414 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
415
416 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
417}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700418
419/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400420 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421
422 @param vm - (vlib_main_t *) vlib main data structure pointer
423 @param buffer_index - (u32) buffer index to free
424*/
425always_inline void
426vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
427{
428 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
429}
430
431/* Add/delete buffer free lists. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400432u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
433 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100434always_inline void
435vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
436{
437 vlib_buffer_main_t *bm = vm->buffer_main;
438
439 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
440
441 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
442}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700443
444/* Find already existing public free list with given size or create one. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400445u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
446 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100448/* Merge two free lists */
449void vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
450 vlib_buffer_free_list_t * src);
451
452/* Make sure we have at least given number of unaligned buffers. */
453void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
454 vlib_buffer_free_list_t *
455 free_list,
456 uword n_unaligned_buffers);
457
458always_inline u32
459vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
460{
461 vlib_buffer_main_t *bm = vm->buffer_main;
462
463 size = vlib_buffer_round_size (size);
464 uword *p = hash_get (bm->free_list_by_size, size);
465 return p ? p[0] : ~0;
466}
467
468always_inline vlib_buffer_free_list_t *
469vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
470 u32 * index)
471{
472 vlib_buffer_main_t *bm = vm->buffer_main;
473 u32 i;
474
Damjan Marion072401e2017-07-13 18:53:27 +0200475 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100476 return pool_elt_at_index (bm->buffer_free_list_pool, i);
477}
478
Ed Warnickecb9cada2015-12-08 15:45:58 -0700479always_inline vlib_buffer_free_list_t *
480vlib_buffer_get_free_list (vlib_main_t * vm, u32 free_list_index)
481{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400482 vlib_buffer_main_t *bm = vm->buffer_main;
483 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700484
485 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
486
487 /* Sanity: indices must match. */
488 ASSERT (f->index == free_list_index);
489
490 return f;
491}
492
493always_inline u32
494vlib_buffer_free_list_buffer_size (vlib_main_t * vm, u32 free_list_index)
495{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400496 vlib_buffer_free_list_t *f =
497 vlib_buffer_get_free_list (vm, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498 return f->n_data_bytes;
499}
500
Dave Barach9b8ffd92016-07-08 08:13:45 -0400501void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700502
503/* Reasonably fast buffer copy routine. */
504always_inline void
505vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
506{
507 while (n >= 4)
508 {
509 dst[0] = src[0];
510 dst[1] = src[1];
511 dst[2] = src[2];
512 dst[3] = src[3];
513 dst += 4;
514 src += 4;
515 n -= 4;
516 }
517 while (n > 0)
518 {
519 dst[0] = src[0];
520 dst += 1;
521 src += 1;
522 n -= 1;
523 }
524}
525
Ed Warnickecb9cada2015-12-08 15:45:58 -0700526/* Append given data to end of buffer, possibly allocating new buffers. */
527u32 vlib_buffer_add_data (vlib_main_t * vm,
528 u32 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400529 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700530
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100531/* duplicate all buffers in chain */
532always_inline vlib_buffer_t *
533vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
534{
535 vlib_buffer_t *s, *d, *fd;
536 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100537 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100538 int i;
539
540 s = b;
541 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
542 {
543 n_buffers++;
544 s = vlib_get_buffer (vm, s->next_buffer);
545 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700546 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100547
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100548 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500549
550 /* No guarantee that we'll get all the buffers we asked for */
551 if (PREDICT_FALSE (n_alloc < n_buffers))
552 {
553 if (n_alloc > 0)
554 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500555 return 0;
556 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100557
558 /* 1st segment */
559 s = b;
560 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100561 d->current_data = s->current_data;
562 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100563 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100564 d->total_length_not_including_first_buffer =
565 s->total_length_not_including_first_buffer;
566 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
Damjan Mariondce05452016-12-01 11:59:33 +0100567 clib_memcpy (vlib_buffer_get_current (d),
568 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100569
570 /* next segments */
571 for (i = 1; i < n_buffers; i++)
572 {
573 /* previous */
574 d->next_buffer = new_buffers[i];
575 /* current */
576 s = vlib_get_buffer (vm, s->next_buffer);
577 d = vlib_get_buffer (vm, new_buffers[i]);
578 d->current_data = s->current_data;
579 d->current_length = s->current_length;
580 clib_memcpy (vlib_buffer_get_current (d),
581 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100582 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100583 }
584
585 return fd;
586}
587
Damjan Marionc47ed032017-01-25 14:18:03 +0100588/** \brief Create multiple clones of buffer and store them in the supplied array
589
590 @param vm - (vlib_main_t *) vlib main data structure pointer
591 @param src_buffer - (u32) source buffer index
592 @param buffers - (u32 * ) buffer index array
593 @param n_buffers - (u8) number of buffer clones requested
594 @param head_end_offset - (u16) offset relative to current position
595 where packet head ends
596 @return - (u8) number of buffers actually cloned, may be
597 less than the number requested or zero
598*/
599
600always_inline u8
601vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
602 u8 n_buffers, u16 head_end_offset)
603{
604 u8 i;
605 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
606
607 ASSERT (s->n_add_refs == 0);
608 ASSERT (n_buffers);
609
610 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
611 {
612 buffers[0] = src_buffer;
613 for (i = 1; i < n_buffers; i++)
614 {
615 vlib_buffer_t *d;
616 d = vlib_buffer_copy (vm, s);
617 if (d == 0)
618 return i;
619 buffers[i] = vlib_get_buffer_index (vm, d);
620
621 }
622 return n_buffers;
623 }
624
625 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
Damjan Marion072401e2017-07-13 18:53:27 +0200626 vlib_buffer_get_free_list_index
627 (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100628 if (PREDICT_FALSE (n_buffers == 0))
629 {
630 buffers[0] = src_buffer;
631 return 1;
632 }
633
634 for (i = 0; i < n_buffers; i++)
635 {
636 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
637 d->current_data = s->current_data;
638 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200639 vlib_buffer_set_free_list_index (d,
640 vlib_buffer_get_free_list_index (s));
Damjan Marionc47ed032017-01-25 14:18:03 +0100641 d->total_length_not_including_first_buffer =
642 s->total_length_not_including_first_buffer + s->current_length -
643 head_end_offset;
644 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
645 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
646 clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
647 clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
648 head_end_offset);
649 d->next_buffer = src_buffer;
650 }
651 vlib_buffer_advance (s, head_end_offset);
652 s->n_add_refs = n_buffers - 1;
653 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
654 {
655 s = vlib_get_buffer (vm, s->next_buffer);
656 s->n_add_refs = n_buffers - 1;
657 }
658
659 return n_buffers;
660}
661
662/** \brief Attach cloned tail to the buffer
663
664 @param vm - (vlib_main_t *) vlib main data structure pointer
665 @param head - (vlib_buffer_t *) head buffer
666 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
667*/
668
669always_inline void
670vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
671 vlib_buffer_t * tail)
672{
673 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200674 ASSERT (vlib_buffer_get_free_list_index (head) ==
675 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100676
677 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
678 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
679 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
680 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
681 head->next_buffer = vlib_get_buffer_index (vm, tail);
682 head->total_length_not_including_first_buffer = tail->current_length +
683 tail->total_length_not_including_first_buffer;
684
685next_segment:
686 __sync_add_and_fetch (&tail->n_add_refs, 1);
687
688 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
689 {
690 tail = vlib_get_buffer (vm, tail->next_buffer);
691 goto next_segment;
692 }
693}
694
Pierre Pfister328e99b2016-02-12 13:18:42 +0000695/* Initializes the buffer as an empty packet with no chained buffers. */
696always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400697vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000698{
699 first->total_length_not_including_first_buffer = 0;
700 first->current_length = 0;
701 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
702 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000703}
704
705/* The provided next_bi buffer index is appended to the end of the packet. */
706always_inline vlib_buffer_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400707vlib_buffer_chain_buffer (vlib_main_t * vm,
708 vlib_buffer_t * first,
709 vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000710{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400711 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000712 last->next_buffer = next_bi;
713 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
714 next_buffer->current_length = 0;
715 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000716 return next_buffer;
717}
718
719/* Increases or decreases the packet length.
720 * It does not allocate or deallocate new buffers.
721 * Therefore, the added length must be compatible
722 * with the last buffer. */
723always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400724vlib_buffer_chain_increase_length (vlib_buffer_t * first,
725 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000726{
727 last->current_length += len;
728 if (first != last)
729 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000730}
731
732/* Copy data to the end of the packet and increases its length.
733 * It does not allocate new buffers.
734 * Returns the number of copied bytes. */
735always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400736vlib_buffer_chain_append_data (vlib_main_t * vm,
737 u32 free_list_index,
738 vlib_buffer_t * first,
739 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000740{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400741 u32 n_buffer_bytes =
742 vlib_buffer_free_list_buffer_size (vm, free_list_index);
743 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
744 u16 len = clib_min (data_len,
745 n_buffer_bytes - last->current_length -
746 last->current_data);
747 clib_memcpy (vlib_buffer_get_current (last) + last->current_length, data,
748 len);
749 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000750 return len;
751}
752
753/* Copy data to the end of the packet and increases its length.
754 * Allocates additional buffers from the free list if necessary.
755 * Returns the number of copied bytes.
756 * 'last' value is modified whenever new buffers are allocated and
757 * chained and points to the last buffer in the chain. */
758u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400759vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
760 u32 free_list_index,
761 vlib_buffer_t * first,
762 vlib_buffer_t ** last,
763 void *data, u16 data_len);
764void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000765
Dave Barach9b8ffd92016-07-08 08:13:45 -0400766format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
767 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700768
Dave Barach9b8ffd92016-07-08 08:13:45 -0400769typedef struct
770{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700771 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400772 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700773
Ed Warnickecb9cada2015-12-08 15:45:58 -0700774 /* Number of buffers to allocate in each call to physmem
775 allocator. */
776 u32 min_n_buffers_each_physmem_alloc;
777
778 /* Buffer free list for this template. */
779 u32 free_list_index;
780
Dave Barach9b8ffd92016-07-08 08:13:45 -0400781 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700782} vlib_packet_template_t;
783
784void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
785 vlib_packet_template_t * t);
786
787void vlib_packet_template_init (vlib_main_t * vm,
788 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400789 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700790 uword n_packet_data_bytes,
791 uword min_n_buffers_each_physmem_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400792 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700793
Dave Barach9b8ffd92016-07-08 08:13:45 -0400794void *vlib_packet_template_get_packet (vlib_main_t * vm,
795 vlib_packet_template_t * t,
796 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700797
798always_inline void
799vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
800{
801 vec_free (t->packet_data);
802}
803
804always_inline u32
805unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
806{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400807 serialize_stream_t *s = &m->stream;
808 vlib_serialize_buffer_main_t *sm
809 = uword_to_pointer (m->stream.data_function_opaque,
810 vlib_serialize_buffer_main_t *);
811 vlib_main_t *vm = sm->vlib_main;
812 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700813
814 n = s->n_buffer_bytes - s->current_buffer_index;
815 if (sm->last_buffer != ~0)
816 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400817 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700818 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
819 {
820 b = vlib_get_buffer (vm, b->next_buffer);
821 n += b->current_length;
822 }
823 }
824
Dave Barach9b8ffd92016-07-08 08:13:45 -0400825 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700826 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
827 n += vlib_buffer_index_length_in_chain (vm, f[0]);
828 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400829/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700830
831 return n;
832}
833
Ed Warnickecb9cada2015-12-08 15:45:58 -0700834/* Set a buffer quickly into "uninitialized" state. We want this to
835 be extremely cheap and arrange for all fields that need to be
836 initialized to be in the first 128 bits of the buffer. */
837always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100838vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700839 vlib_buffer_free_list_t * fl)
840{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100841 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700842
Damjan Marion19010202016-03-24 17:17:47 +0100843 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400844 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
845 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
846 CLIB_CACHE_LINE_BYTES);
847 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
848 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +0100849
Ed Warnickecb9cada2015-12-08 15:45:58 -0700850 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200851 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700852
Dave Barachf8690282017-03-01 11:38:02 -0500853 clib_memcpy (STRUCT_MARK_PTR (dst, template_start),
854 STRUCT_MARK_PTR (src, template_start),
855 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
856 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
857
858 /* Not in the first 16 octets. */
859 dst->n_add_refs = src->n_add_refs;
860
Ed Warnickecb9cada2015-12-08 15:45:58 -0700861 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500862#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400863 _(current_data);
864 _(current_length);
865 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700866#undef _
Florin Corasb2215d62017-08-01 16:56:58 -0700867 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
868 /* total_length_not_including_first_buffer is not in the template anymore
869 * so it may actually not zeroed for some buffers. One option is to
870 * uncomment the line lower (comes at a cost), the other, is to just not
871 * care */
872 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +0100873 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700874}
875
876always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100877vlib_buffer_add_to_free_list (vlib_main_t * vm,
878 vlib_buffer_free_list_t * f,
879 u32 buffer_index, u8 do_init)
880{
881 vlib_buffer_t *b;
882 b = vlib_get_buffer (vm, buffer_index);
883 if (PREDICT_TRUE (do_init))
884 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100885 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400886
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200887 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -0400888 {
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200889 vlib_buffer_free_list_t *mf;
890 mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
891 clib_spinlock_lock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400892 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200893 vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
894 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400895 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +0100896 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Marionb6a8ed72017-08-29 00:15:35 +0200897 clib_spinlock_unlock (&mf->global_buffers_lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400898 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100899}
900
901always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100902vlib_buffer_init_two_for_free_list (vlib_buffer_t * dst0,
903 vlib_buffer_t * dst1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700904 vlib_buffer_free_list_t * fl)
905{
Damjan Marionbd69a5f2017-02-05 23:44:42 +0100906 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700907
908 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +0200909 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700910
Dave Barachf8690282017-03-01 11:38:02 -0500911 clib_memcpy (STRUCT_MARK_PTR (dst0, template_start),
912 STRUCT_MARK_PTR (src, template_start),
913 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
914 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
915
916 clib_memcpy (STRUCT_MARK_PTR (dst1, template_start),
917 STRUCT_MARK_PTR (src, template_start),
918 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
919 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
920
921 /* Not in the first 16 octets. */
922 dst0->n_add_refs = src->n_add_refs;
923 dst1->n_add_refs = src->n_add_refs;
924
Ed Warnickecb9cada2015-12-08 15:45:58 -0700925 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -0500926#define _(f) ASSERT (dst0->f == src->f); ASSERT( dst1->f == src->f)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400927 _(current_data);
928 _(current_length);
929 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700930#undef _
Dave Barachf8690282017-03-01 11:38:02 -0500931
932 ASSERT (dst0->total_length_not_including_first_buffer == 0);
933 ASSERT (dst1->total_length_not_including_first_buffer == 0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100934 ASSERT (dst0->n_add_refs == 0);
935 ASSERT (dst1->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700936}
937
938#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +0100939extern u32 *vlib_buffer_state_validation_lock;
940extern uword *vlib_buffer_state_validation_hash;
941extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700942#endif
943
Dave Barach9b8ffd92016-07-08 08:13:45 -0400944static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700945vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
946{
947#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400948 uword *p;
949 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700950
951 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
952
953 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
954 ;
955
956 p = hash_get (vlib_buffer_state_validation_hash, b);
957
958 /* If we don't know about b, declare it to be in the expected state */
959 if (!p)
960 {
961 hash_set (vlib_buffer_state_validation_hash, b, expected);
962 goto out;
963 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400964
Ed Warnickecb9cada2015-12-08 15:45:58 -0700965 if (p[0] != expected)
966 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400967 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700968 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400969 vlib_main_t *vm = &vlib_global_main;
970
971 cj_stop ();
972
Ed Warnickecb9cada2015-12-08 15:45:58 -0700973 bi = vlib_get_buffer_index (vm, b);
974
975 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400976 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
977 vlib_time_now (vm), bi,
978 p[0] ? "busy" : "free", expected ? "busy" : "free");
979 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700980 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400981out:
982 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700983 *vlib_buffer_state_validation_lock = 0;
984 clib_mem_set_heap (oldheap);
985#endif
986}
987
Dave Barach9b8ffd92016-07-08 08:13:45 -0400988static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700989vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
990{
991#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400992 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700993
994 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
995
996 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
997 ;
998
999 hash_set (vlib_buffer_state_validation_hash, b, expected);
1000
Dave Barach9b8ffd92016-07-08 08:13:45 -04001001 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001002 *vlib_buffer_state_validation_lock = 0;
1003 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001004#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001005}
1006
1007#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001008
1009/*
1010 * fd.io coding-style-patch-verification: ON
1011 *
1012 * Local Variables:
1013 * eval: (c-set-style "gnu")
1014 * End:
1015 */