blob: e8ccc86f1a9eac7c2aedfbe7e71f4d5b8497a413 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_funcs.h: VLIB buffer related functions/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_funcs_h
41#define included_vlib_buffer_funcs_h
42
43#include <vppinfra/hash.h>
Dave Barachc3a06552018-10-01 09:25:32 -040044#include <vppinfra/fifo.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
46/** \file
47 vlib buffer access methods.
48*/
49
50
51/** \brief Translate buffer index into buffer pointer
52
53 @param vm - (vlib_main_t *) vlib main data structure pointer
54 @param buffer_index - (u32) buffer index
55 @return - (vlib_buffer_t *) buffer pointer
Dave Barach9b8ffd92016-07-08 08:13:45 -040056*/
Ed Warnickecb9cada2015-12-08 15:45:58 -070057always_inline vlib_buffer_t *
58vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
59{
Damjan Mariond1274cb2018-03-13 21:32:17 +010060 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +020061 uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
62 ASSERT (offset < bm->buffer_mem_size);
63
64 return uword_to_pointer (bm->buffer_mem_start + offset, void *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070065}
66
Damjan Marionafe56de2018-05-17 12:44:00 +020067/** \brief Translate array of buffer indices into buffer pointers with offset
68
69 @param vm - (vlib_main_t *) vlib main data structure pointer
70 @param bi - (u32 *) array of buffer indices
71 @param b - (void **) array to store buffer pointers
72 @param count - (uword) number of elements
73 @param offset - (i32) offset applied to each pointer
74*/
75static_always_inline void
76vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
77 i32 offset)
78{
79#ifdef CLIB_HAVE_VEC256
80 u64x4 off = u64x4_splat (buffer_main.buffer_mem_start + offset);
81 /* if count is not const, compiler will not unroll while loop
82 se we maintain two-in-parallel variant */
83 while (count >= 8)
84 {
85 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
86 u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
87 /* shift and add to get vlib_buffer_t pointer */
88 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
89 u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
90 b += 8;
91 bi += 8;
92 count -= 8;
93 }
94#endif
95 while (count >= 4)
96 {
97#ifdef CLIB_HAVE_VEC256
98 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
99 /* shift and add to get vlib_buffer_t pointer */
100 u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
Sirshak Das759226e2018-08-22 08:46:52 +0800101#elif defined (CLIB_HAVE_VEC128)
Damjan Marion5df580e2018-07-27 01:47:57 +0200102 u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
103 u32x4 bi4 = u32x4_load_unaligned (bi);
104 u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800105#if defined (__aarch64__)
106 u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
107#else
Damjan Marion5df580e2018-07-27 01:47:57 +0200108 bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
109 u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
Sirshak Das759226e2018-08-22 08:46:52 +0800110#endif
Damjan Marion5df580e2018-07-27 01:47:57 +0200111 u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
112 u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
Damjan Marionafe56de2018-05-17 12:44:00 +0200113#else
114 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
115 b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
116 b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
117 b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
118#endif
119 b += 4;
120 bi += 4;
121 count -= 4;
122 }
123 while (count)
124 {
125 b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
126 b += 1;
127 bi += 1;
128 count -= 1;
129 }
130}
131
132/** \brief Translate array of buffer indices into buffer pointers
133
134 @param vm - (vlib_main_t *) vlib main data structure pointer
135 @param bi - (u32 *) array of buffer indices
136 @param b - (vlib_buffer_t **) array to store buffer pointers
137 @param count - (uword) number of elements
138*/
139
140static_always_inline void
141vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
142{
143 vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);
144}
145
Ed Warnickecb9cada2015-12-08 15:45:58 -0700146/** \brief Translate buffer pointer into buffer index
147
148 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400149 @param p - (void *) buffer pointer
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 @return - (u32) buffer index
Dave Barach9b8ffd92016-07-08 08:13:45 -0400151*/
Damjan Marion04a7f052017-07-10 15:06:17 +0200152
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153always_inline u32
Dave Barach9b8ffd92016-07-08 08:13:45 -0400154vlib_get_buffer_index (vlib_main_t * vm, void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100156 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion04a7f052017-07-10 15:06:17 +0200157 uword offset = pointer_to_uword (p) - bm->buffer_mem_start;
158 ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start);
159 ASSERT (offset < bm->buffer_mem_size);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400160 ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
162}
163
Damjan Marionafe56de2018-05-17 12:44:00 +0200164/** \brief Translate array of buffer pointers into buffer indices with offset
165
166 @param vm - (vlib_main_t *) vlib main data structure pointer
167 @param b - (void **) array of buffer pointers
168 @param bi - (u32 *) array to store buffer indices
169 @param count - (uword) number of elements
170 @param offset - (i32) offset applied to each pointer
171*/
172static_always_inline void
173vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi,
174 uword count, i32 offset)
175{
176#ifdef CLIB_HAVE_VEC256
177 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
178 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - offset);
179
180 while (count >= 8)
181 {
182 /* load 4 pointers into 256-bit register */
183 u64x4 v0 = u64x4_load_unaligned (b);
184 u64x4 v1 = u64x4_load_unaligned (b + 4);
185 u32x8 v2, v3;
186
187 v0 -= off4;
188 v1 -= off4;
189
190 v0 >>= CLIB_LOG2_CACHE_LINE_BYTES;
191 v1 >>= CLIB_LOG2_CACHE_LINE_BYTES;
192
193 /* permute 256-bit register so lower u32s of each buffer index are
194 * placed into lower 128-bits */
195 v2 = u32x8_permute ((u32x8) v0, mask);
196 v3 = u32x8_permute ((u32x8) v1, mask);
197
198 /* extract lower 128-bits and save them to the array of buffer indices */
199 u32x4_store_unaligned (u32x8_extract_lo (v2), bi);
200 u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4);
201 bi += 8;
202 b += 8;
203 count -= 8;
204 }
205#endif
206 while (count >= 4)
207 {
208 /* equivalent non-nector implementation */
209 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
210 bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset);
211 bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset);
212 bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset);
213 bi += 4;
214 b += 4;
215 count -= 4;
216 }
217 while (count)
218 {
Zhiyong Yang462072a2018-05-30 22:12:57 -0400219 bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset);
Damjan Marionafe56de2018-05-17 12:44:00 +0200220 bi += 1;
221 b += 1;
222 count -= 1;
223 }
224}
225
226/** \brief Translate array of buffer pointers into buffer indices
227
228 @param vm - (vlib_main_t *) vlib main data structure pointer
229 @param b - (vlib_buffer_t **) array of buffer pointers
230 @param bi - (u32 *) array to store buffer indices
231 @param count - (uword) number of elements
232*/
233static_always_inline void
234vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi,
235 uword count)
236{
237 vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0);
238}
239
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240/** \brief Get next buffer in buffer linklist, or zero for end of list.
241
242 @param vm - (vlib_main_t *) vlib main data structure pointer
243 @param b - (void *) buffer pointer
244 @return - (vlib_buffer_t *) next buffer, or NULL
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246always_inline vlib_buffer_t *
247vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
248{
249 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
Dave Barach9b8ffd92016-07-08 08:13:45 -0400250 ? vlib_get_buffer (vm, b->next_buffer) : 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251}
252
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
254 vlib_buffer_t * b_first);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255
256/** \brief Get length in bytes of the buffer chain
257
258 @param vm - (vlib_main_t *) vlib main data structure pointer
259 @param b - (void *) buffer pointer
260 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400261*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262always_inline uword
263vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
264{
Damjan Marion072401e2017-07-13 18:53:27 +0200265 uword len = b->current_length;
266
267 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
268 return len;
269
270 if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
271 return len + b->total_length_not_including_first_buffer;
272
273 return vlib_buffer_length_in_chain_slow_path (vm, b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274}
275
276/** \brief Get length in bytes of the buffer index buffer chain
277
278 @param vm - (vlib_main_t *) vlib main data structure pointer
279 @param bi - (u32) buffer index
280 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400281*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282always_inline uword
283vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
284{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400285 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286 return vlib_buffer_length_in_chain (vm, b);
287}
288
289/** \brief Copy buffer contents to memory
290
291 @param vm - (vlib_main_t *) vlib main data structure pointer
Chris Luked4024f52016-09-06 09:32:36 -0400292 @param buffer_index - (u32) buffer index
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293 @param contents - (u8 *) memory, <strong>must be large enough</strong>
294 @return - (uword) length of buffer chain
Dave Barach9b8ffd92016-07-08 08:13:45 -0400295*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296always_inline uword
297vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
298{
299 uword content_len = 0;
300 uword l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400301 vlib_buffer_t *b;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
303 while (1)
304 {
305 b = vlib_get_buffer (vm, buffer_index);
306 l = b->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500307 clib_memcpy_fast (contents + content_len, b->data + b->current_data, l);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 content_len += l;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400309 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310 break;
311 buffer_index = b->next_buffer;
312 }
313
314 return content_len;
315}
316
Damjan Marion8f499362018-10-22 13:07:02 +0200317always_inline uword
318vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319{
Damjan Marion68b4da62018-09-30 18:26:20 +0200320 return vlib_physmem_get_pa (vm, b->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321}
322
Damjan Marion8f499362018-10-22 13:07:02 +0200323always_inline uword
324vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b)
325{
326 return vlib_buffer_get_pa (vm, b) + b->current_data;
327}
328
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329/** \brief Prefetch buffer metadata by buffer index
330 The first 64 bytes of buffer contains most header information
331
332 @param vm - (vlib_main_t *) vlib main data structure pointer
333 @param bi - (u32) buffer index
334 @param type - LOAD, STORE. In most cases, STORE is the right answer
335*/
336/* Prefetch buffer header given index. */
337#define vlib_prefetch_buffer_with_index(vm,bi,type) \
338 do { \
339 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
340 vlib_prefetch_buffer_header (_b, type); \
341 } while (0)
342
343#if 0
344/* Iterate over known allocated vlib bufs. You probably do not want
345 * to do this!
346 @param vm the vlib_main_t
347 @param bi found allocated buffer index
348 @param body operation to perform on buffer index
349 function executes body for each allocated buffer index
350 */
351#define vlib_buffer_foreach_allocated(vm,bi,body) \
352do { \
353 vlib_main_t * _vmain = (vm); \
354 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
355 hash_pair_t * _vbpair; \
356 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
357 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
358 (bi) = _vbpair->key; \
359 body; \
360 } \
361 })); \
362} while (0)
363#endif
364
Dave Barach9b8ffd92016-07-08 08:13:45 -0400365typedef enum
366{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367 /* Index is unknown. */
368 VLIB_BUFFER_UNKNOWN,
369
370 /* Index is known and free/allocated. */
371 VLIB_BUFFER_KNOWN_FREE,
372 VLIB_BUFFER_KNOWN_ALLOCATED,
373} vlib_buffer_known_state_t;
374
Damjan Marionc8a26c62017-11-24 20:15:23 +0100375void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
376 uword n_buffers,
377 vlib_buffer_known_state_t
378 expected_state);
379
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380always_inline vlib_buffer_known_state_t
Steven899a84b2018-01-29 20:09:09 -0800381vlib_buffer_is_known (u32 buffer_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100383 vlib_buffer_main_t *bm = &buffer_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700384
Damjan Marion6b0f5892017-07-27 04:01:24 -0400385 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400386 uword *p = hash_get (bm->buffer_known_hash, buffer_index);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400387 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700388 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
389}
390
391always_inline void
Steven899a84b2018-01-29 20:09:09 -0800392vlib_buffer_set_known_state (u32 buffer_index,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700393 vlib_buffer_known_state_t state)
394{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100395 vlib_buffer_main_t *bm = &buffer_main;
Steven899a84b2018-01-29 20:09:09 -0800396
Damjan Marion6b0f5892017-07-27 04:01:24 -0400397 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700398 hash_set (bm->buffer_known_hash, buffer_index, state);
Damjan Marion6b0f5892017-07-27 04:01:24 -0400399 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400}
401
402/* Validates sanity of a single buffer.
403 Returns format'ed vector with error message if any. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400404u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
405 uword follow_chain);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406
Ed Warnickecb9cada2015-12-08 15:45:58 -0700407always_inline u32
408vlib_buffer_round_size (u32 size)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400409{
410 return round_pow2 (size, sizeof (vlib_buffer_t));
411}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700412
Damjan Mariondac03522018-02-01 15:30:13 +0100413always_inline vlib_buffer_free_list_index_t
Damjan Marion072401e2017-07-13 18:53:27 +0200414vlib_buffer_get_free_list_index (vlib_buffer_t * b)
415{
Damjan Mariondac03522018-02-01 15:30:13 +0100416 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NON_DEFAULT_FREELIST))
417 return b->free_list_index;
418
419 return 0;
Damjan Marion072401e2017-07-13 18:53:27 +0200420}
421
422always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100423vlib_buffer_set_free_list_index (vlib_buffer_t * b,
424 vlib_buffer_free_list_index_t index)
Damjan Marion072401e2017-07-13 18:53:27 +0200425{
Damjan Mariondac03522018-02-01 15:30:13 +0100426 if (PREDICT_FALSE (index))
427 {
428 b->flags |= VLIB_BUFFER_NON_DEFAULT_FREELIST;
429 b->free_list_index = index;
430 }
431 else
432 b->flags &= ~VLIB_BUFFER_NON_DEFAULT_FREELIST;
Damjan Marion072401e2017-07-13 18:53:27 +0200433}
434
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435/** \brief Allocate buffers from specific freelist into supplied array
436
437 @param vm - (vlib_main_t *) vlib main data structure pointer
438 @param buffers - (u32 * ) buffer index array
439 @param n_buffers - (u32) number of buffers requested
Dave Barach9b8ffd92016-07-08 08:13:45 -0400440 @return - (u32) number of buffers actually allocated, may be
Ed Warnickecb9cada2015-12-08 15:45:58 -0700441 less than the number requested or zero
442*/
Damjan Marion878c6092017-01-04 13:19:27 +0100443always_inline u32
444vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
445 u32 * buffers,
Damjan Mariondac03522018-02-01 15:30:13 +0100446 u32 n_buffers,
447 vlib_buffer_free_list_index_t index)
Damjan Marion878c6092017-01-04 13:19:27 +0100448{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100449 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100450 vlib_buffer_free_list_t *fl;
451 u32 *src;
452 uword len;
Damjan Marion878c6092017-01-04 13:19:27 +0100453
Damjan Marionc8a26c62017-11-24 20:15:23 +0100454 ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
Damjan Marion878c6092017-01-04 13:19:27 +0100455
Damjan Mariond1274cb2018-03-13 21:32:17 +0100456 fl = pool_elt_at_index (vm->buffer_free_list_pool, index);
Damjan Marionc8a26c62017-11-24 20:15:23 +0100457
458 len = vec_len (fl->buffers);
459
460 if (PREDICT_FALSE (len < n_buffers))
461 {
462 bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
Damjan Marionc22fcba2018-03-06 18:46:54 +0100463 if (PREDICT_FALSE ((len = vec_len (fl->buffers)) == 0))
464 return 0;
Damjan Marionc8a26c62017-11-24 20:15:23 +0100465
466 /* even if fill free list didn't manage to refill free list
467 we should give what we have */
468 n_buffers = clib_min (len, n_buffers);
469
470 /* following code is intentionaly duplicated to allow compiler
471 to optimize fast path when n_buffers is constant value */
472 src = fl->buffers + len - n_buffers;
Dave Barach178cf492018-11-13 16:34:13 -0500473 clib_memcpy_fast (buffers, src, n_buffers * sizeof (u32));
Damjan Marionc8a26c62017-11-24 20:15:23 +0100474 _vec_len (fl->buffers) -= n_buffers;
475
476 /* Verify that buffers are known free. */
477 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
478 VLIB_BUFFER_KNOWN_FREE);
479
480 return n_buffers;
481 }
482
483 src = fl->buffers + len - n_buffers;
Dave Barach178cf492018-11-13 16:34:13 -0500484 clib_memcpy_fast (buffers, src, n_buffers * sizeof (u32));
Damjan Marionc8a26c62017-11-24 20:15:23 +0100485 _vec_len (fl->buffers) -= n_buffers;
486
487 /* Verify that buffers are known free. */
488 vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
489 VLIB_BUFFER_KNOWN_FREE);
490
491 return n_buffers;
492}
493
494/** \brief Allocate buffers into supplied array
495
496 @param vm - (vlib_main_t *) vlib main data structure pointer
497 @param buffers - (u32 * ) buffer index array
498 @param n_buffers - (u32) number of buffers requested
499 @return - (u32) number of buffers actually allocated, may be
500 less than the number requested or zero
501*/
502always_inline u32
503vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
504{
505 return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
506 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Damjan Marion878c6092017-01-04 13:19:27 +0100507}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700508
Damjan Marionc58408c2018-01-18 14:54:04 +0100509/** \brief Allocate buffers into ring
510
511 @param vm - (vlib_main_t *) vlib main data structure pointer
512 @param buffers - (u32 * ) buffer index ring
513 @param start - (u32) first slot in the ring
514 @param ring_size - (u32) ring size
515 @param n_buffers - (u32) number of buffers requested
516 @return - (u32) number of buffers actually allocated, may be
517 less than the number requested or zero
518*/
519always_inline u32
520vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start,
521 u32 ring_size, u32 n_buffers)
522{
523 u32 n_alloc;
524
525 ASSERT (n_buffers <= ring_size);
526
527 if (PREDICT_TRUE (start + n_buffers <= ring_size))
528 return vlib_buffer_alloc (vm, ring + start, n_buffers);
529
530 n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start);
531
532 if (PREDICT_TRUE (n_alloc == ring_size - start))
533 n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc);
534
535 return n_alloc;
536}
537
Ed Warnickecb9cada2015-12-08 15:45:58 -0700538/** \brief Free buffers
539 Frees the entire buffer chain for each buffer
540
541 @param vm - (vlib_main_t *) vlib main data structure pointer
542 @param buffers - (u32 * ) buffer index array
543 @param n_buffers - (u32) number of buffers to free
544
545*/
Damjan Marion878c6092017-01-04 13:19:27 +0100546always_inline void
547vlib_buffer_free (vlib_main_t * vm,
548 /* pointer to first buffer */
549 u32 * buffers,
550 /* number of buffers to free */
551 u32 n_buffers)
552{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100553 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100554
555 ASSERT (bm->cb.vlib_buffer_free_cb);
556
557 return bm->cb.vlib_buffer_free_cb (vm, buffers, n_buffers);
558}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700559
560/** \brief Free buffers, does not free the buffer chain for each buffer
561
562 @param vm - (vlib_main_t *) vlib main data structure pointer
563 @param buffers - (u32 * ) buffer index array
564 @param n_buffers - (u32) number of buffers to free
565
566*/
Damjan Marion878c6092017-01-04 13:19:27 +0100567always_inline void
568vlib_buffer_free_no_next (vlib_main_t * vm,
569 /* pointer to first buffer */
570 u32 * buffers,
571 /* number of buffers to free */
572 u32 n_buffers)
573{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100574 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100575
576 ASSERT (bm->cb.vlib_buffer_free_no_next_cb);
577
578 return bm->cb.vlib_buffer_free_no_next_cb (vm, buffers, n_buffers);
579}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700580
581/** \brief Free one buffer
Dave Barach9b8ffd92016-07-08 08:13:45 -0400582 Shorthand to free a single buffer chain.
Ed Warnickecb9cada2015-12-08 15:45:58 -0700583
584 @param vm - (vlib_main_t *) vlib main data structure pointer
585 @param buffer_index - (u32) buffer index to free
586*/
587always_inline void
588vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
589{
590 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
591}
592
Damjan Mariona3731492018-02-25 22:50:39 +0100593/** \brief Free buffers from ring
594
595 @param vm - (vlib_main_t *) vlib main data structure pointer
596 @param buffers - (u32 * ) buffer index ring
597 @param start - (u32) first slot in the ring
598 @param ring_size - (u32) ring size
599 @param n_buffers - (u32) number of buffers
600*/
601always_inline void
602vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start,
603 u32 ring_size, u32 n_buffers)
604{
605 ASSERT (n_buffers <= ring_size);
606
607 if (PREDICT_TRUE (start + n_buffers <= ring_size))
608 {
609 vlib_buffer_free (vm, ring + start, n_buffers);
610 }
611 else
612 {
613 vlib_buffer_free (vm, ring + start, ring_size - start);
614 vlib_buffer_free (vm, ring, n_buffers - (ring_size - start));
615 }
616}
617
Damjan Marioncef1db92018-03-28 18:27:38 +0200618/** \brief Free buffers from ring without freeing tail buffers
619
620 @param vm - (vlib_main_t *) vlib main data structure pointer
621 @param buffers - (u32 * ) buffer index ring
622 @param start - (u32) first slot in the ring
623 @param ring_size - (u32) ring size
624 @param n_buffers - (u32) number of buffers
625*/
626always_inline void
627vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start,
628 u32 ring_size, u32 n_buffers)
629{
630 ASSERT (n_buffers <= ring_size);
631
632 if (PREDICT_TRUE (start + n_buffers <= ring_size))
633 {
Damjan Marion4a973932018-06-09 19:29:16 +0200634 vlib_buffer_free_no_next (vm, ring + start, n_buffers);
Damjan Marioncef1db92018-03-28 18:27:38 +0200635 }
636 else
637 {
638 vlib_buffer_free_no_next (vm, ring + start, ring_size - start);
639 vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start));
640 }
641}
Damjan Mariona3731492018-02-25 22:50:39 +0100642
Ed Warnickecb9cada2015-12-08 15:45:58 -0700643/* Add/delete buffer free lists. */
Damjan Mariondac03522018-02-01 15:30:13 +0100644vlib_buffer_free_list_index_t vlib_buffer_create_free_list (vlib_main_t * vm,
645 u32 n_data_bytes,
646 char *fmt, ...);
Damjan Marion878c6092017-01-04 13:19:27 +0100647always_inline void
Damjan Mariondac03522018-02-01 15:30:13 +0100648vlib_buffer_delete_free_list (vlib_main_t * vm,
649 vlib_buffer_free_list_index_t free_list_index)
Damjan Marion878c6092017-01-04 13:19:27 +0100650{
Damjan Mariond1274cb2018-03-13 21:32:17 +0100651 vlib_buffer_main_t *bm = &buffer_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100652
653 ASSERT (bm->cb.vlib_buffer_delete_free_list_cb);
654
655 bm->cb.vlib_buffer_delete_free_list_cb (vm, free_list_index);
656}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700657
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100658/* Make sure we have at least given number of unaligned buffers. */
659void vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
660 vlib_buffer_free_list_t *
661 free_list,
662 uword n_unaligned_buffers);
663
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100664always_inline vlib_buffer_free_list_t *
665vlib_buffer_get_buffer_free_list (vlib_main_t * vm, vlib_buffer_t * b,
Damjan Mariondac03522018-02-01 15:30:13 +0100666 vlib_buffer_free_list_index_t * index)
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100667{
Damjan Mariondac03522018-02-01 15:30:13 +0100668 vlib_buffer_free_list_index_t i;
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100669
Damjan Marion072401e2017-07-13 18:53:27 +0200670 *index = i = vlib_buffer_get_free_list_index (b);
Damjan Mariond1274cb2018-03-13 21:32:17 +0100671 return pool_elt_at_index (vm->buffer_free_list_pool, i);
Damjan Marion8a6a3b22017-01-17 14:12:42 +0100672}
673
Ed Warnickecb9cada2015-12-08 15:45:58 -0700674always_inline vlib_buffer_free_list_t *
Damjan Mariondac03522018-02-01 15:30:13 +0100675vlib_buffer_get_free_list (vlib_main_t * vm,
676 vlib_buffer_free_list_index_t free_list_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700677{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400678 vlib_buffer_free_list_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700679
Damjan Mariond1274cb2018-03-13 21:32:17 +0100680 f = pool_elt_at_index (vm->buffer_free_list_pool, free_list_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700681
682 /* Sanity: indices must match. */
683 ASSERT (f->index == free_list_index);
684
685 return f;
686}
687
688always_inline u32
Damjan Mariondac03522018-02-01 15:30:13 +0100689vlib_buffer_free_list_buffer_size (vlib_main_t * vm,
690 vlib_buffer_free_list_index_t index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691{
Damjan Mariondac03522018-02-01 15:30:13 +0100692 vlib_buffer_free_list_t *f = vlib_buffer_get_free_list (vm, index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700693 return f->n_data_bytes;
694}
695
Dave Barach9b8ffd92016-07-08 08:13:45 -0400696void vlib_aligned_memcpy (void *_dst, void *_src, int n_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700697
698/* Reasonably fast buffer copy routine. */
699always_inline void
700vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
701{
702 while (n >= 4)
703 {
704 dst[0] = src[0];
705 dst[1] = src[1];
706 dst[2] = src[2];
707 dst[3] = src[3];
708 dst += 4;
709 src += 4;
710 n -= 4;
711 }
712 while (n > 0)
713 {
714 dst[0] = src[0];
715 dst += 1;
716 src += 1;
717 n -= 1;
718 }
719}
720
Ed Warnickecb9cada2015-12-08 15:45:58 -0700721/* Append given data to end of buffer, possibly allocating new buffers. */
722u32 vlib_buffer_add_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100723 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400724 u32 buffer_index, void *data, u32 n_data_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700725
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100726/* duplicate all buffers in chain */
727always_inline vlib_buffer_t *
728vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
729{
730 vlib_buffer_t *s, *d, *fd;
731 uword n_alloc, n_buffers = 1;
Damjan Marion67655492016-11-15 12:50:28 +0100732 u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100733 int i;
734
735 s = b;
736 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
737 {
738 n_buffers++;
739 s = vlib_get_buffer (vm, s->next_buffer);
740 }
Neale Ranns9d676af2017-03-15 01:28:31 -0700741 u32 new_buffers[n_buffers];
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100742
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100743 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
Dave Barach26cd8c12017-02-23 17:11:26 -0500744
745 /* No guarantee that we'll get all the buffers we asked for */
746 if (PREDICT_FALSE (n_alloc < n_buffers))
747 {
748 if (n_alloc > 0)
749 vlib_buffer_free (vm, new_buffers, n_alloc);
Dave Barach26cd8c12017-02-23 17:11:26 -0500750 return 0;
751 }
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100752
753 /* 1st segment */
754 s = b;
755 fd = d = vlib_get_buffer (vm, new_buffers[0]);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100756 d->current_data = s->current_data;
757 d->current_length = s->current_length;
Damjan Marion67655492016-11-15 12:50:28 +0100758 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100759 d->total_length_not_including_first_buffer =
760 s->total_length_not_including_first_buffer;
Dave Barach178cf492018-11-13 16:34:13 -0500761 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
762 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
763 clib_memcpy_fast (vlib_buffer_get_current (d),
764 vlib_buffer_get_current (s), s->current_length);
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100765
766 /* next segments */
767 for (i = 1; i < n_buffers; i++)
768 {
769 /* previous */
770 d->next_buffer = new_buffers[i];
771 /* current */
772 s = vlib_get_buffer (vm, s->next_buffer);
773 d = vlib_get_buffer (vm, new_buffers[i]);
774 d->current_data = s->current_data;
775 d->current_length = s->current_length;
Dave Barach178cf492018-11-13 16:34:13 -0500776 clib_memcpy_fast (vlib_buffer_get_current (d),
777 vlib_buffer_get_current (s), s->current_length);
Damjan Marion67655492016-11-15 12:50:28 +0100778 d->flags = s->flags & flag_mask;
Damjan Marion05ab8cb2016-11-03 20:16:04 +0100779 }
780
781 return fd;
782}
783
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800784/** \brief Create a maximum of 256 clones of buffer and store them
785 in the supplied array
Damjan Marionc47ed032017-01-25 14:18:03 +0100786
787 @param vm - (vlib_main_t *) vlib main data structure pointer
788 @param src_buffer - (u32) source buffer index
789 @param buffers - (u32 * ) buffer index array
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800790 @param n_buffers - (u16) number of buffer clones requested (<=256)
Damjan Marionc47ed032017-01-25 14:18:03 +0100791 @param head_end_offset - (u16) offset relative to current position
792 where packet head ends
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800793 @return - (u16) number of buffers actually cloned, may be
Damjan Marionc47ed032017-01-25 14:18:03 +0100794 less than the number requested or zero
795*/
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800796always_inline u16
797vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
798 u16 n_buffers, u16 head_end_offset)
Damjan Marionc47ed032017-01-25 14:18:03 +0100799{
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800800 u16 i;
Damjan Marionc47ed032017-01-25 14:18:03 +0100801 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
802
803 ASSERT (s->n_add_refs == 0);
804 ASSERT (n_buffers);
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800805 ASSERT (n_buffers <= 256);
Damjan Marionc47ed032017-01-25 14:18:03 +0100806
807 if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
808 {
809 buffers[0] = src_buffer;
810 for (i = 1; i < n_buffers; i++)
811 {
812 vlib_buffer_t *d;
813 d = vlib_buffer_copy (vm, s);
814 if (d == 0)
815 return i;
816 buffers[i] = vlib_get_buffer_index (vm, d);
817
818 }
819 return n_buffers;
820 }
821
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800822 if (PREDICT_FALSE (n_buffers == 1))
Damjan Marionc47ed032017-01-25 14:18:03 +0100823 {
824 buffers[0] = src_buffer;
825 return 1;
826 }
827
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800828 n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
829 vlib_buffer_get_free_list_index
830 (s));
831
Damjan Marionc47ed032017-01-25 14:18:03 +0100832 for (i = 0; i < n_buffers; i++)
833 {
834 vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
835 d->current_data = s->current_data;
836 d->current_length = head_end_offset;
Damjan Marion072401e2017-07-13 18:53:27 +0200837 vlib_buffer_set_free_list_index (d,
838 vlib_buffer_get_free_list_index (s));
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200839
840 d->total_length_not_including_first_buffer = s->current_length -
Damjan Marionc47ed032017-01-25 14:18:03 +0100841 head_end_offset;
Yoann Desmouceaux1977a342018-05-29 13:38:44 +0200842 if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
843 {
844 d->total_length_not_including_first_buffer +=
845 s->total_length_not_including_first_buffer;
846 }
Damjan Marionc47ed032017-01-25 14:18:03 +0100847 d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
848 d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Dave Barach178cf492018-11-13 16:34:13 -0500849 clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
850 clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
851 clib_memcpy_fast (vlib_buffer_get_current (d),
852 vlib_buffer_get_current (s), head_end_offset);
Damjan Marionc47ed032017-01-25 14:18:03 +0100853 d->next_buffer = src_buffer;
854 }
855 vlib_buffer_advance (s, head_end_offset);
856 s->n_add_refs = n_buffers - 1;
857 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
858 {
859 s = vlib_get_buffer (vm, s->next_buffer);
860 s->n_add_refs = n_buffers - 1;
861 }
862
863 return n_buffers;
864}
865
Neale Ranns8f36e4a2018-01-11 09:02:01 -0800866/** \brief Create multiple clones of buffer and store them
867 in the supplied array
868
869 @param vm - (vlib_main_t *) vlib main data structure pointer
870 @param src_buffer - (u32) source buffer index
871 @param buffers - (u32 * ) buffer index array
872 @param n_buffers - (u16) number of buffer clones requested (<=256)
873 @param head_end_offset - (u16) offset relative to current position
874 where packet head ends
875 @return - (u16) number of buffers actually cloned, may be
876 less than the number requested or zero
877*/
878always_inline u16
879vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
880 u16 n_buffers, u16 head_end_offset)
881{
882 vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
883 u16 n_cloned = 0;
884
885 while (n_buffers > 256)
886 {
887 vlib_buffer_t *copy;
888 copy = vlib_buffer_copy (vm, s);
889 n_cloned += vlib_buffer_clone_256 (vm,
890 vlib_get_buffer_index (vm, copy),
891 (buffers + n_cloned),
892 256, head_end_offset);
893 n_buffers -= 256;
894 }
895 n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
896 buffers + n_cloned,
897 n_buffers, head_end_offset);
898
899 return n_cloned;
900}
901
Damjan Marionc47ed032017-01-25 14:18:03 +0100902/** \brief Attach cloned tail to the buffer
903
904 @param vm - (vlib_main_t *) vlib main data structure pointer
905 @param head - (vlib_buffer_t *) head buffer
906 @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
907*/
908
909always_inline void
910vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
911 vlib_buffer_t * tail)
912{
913 ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
Damjan Marion072401e2017-07-13 18:53:27 +0200914 ASSERT (vlib_buffer_get_free_list_index (head) ==
915 vlib_buffer_get_free_list_index (tail));
Damjan Marionc47ed032017-01-25 14:18:03 +0100916
917 head->flags |= VLIB_BUFFER_NEXT_PRESENT;
918 head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
919 head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
920 head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
921 head->next_buffer = vlib_get_buffer_index (vm, tail);
922 head->total_length_not_including_first_buffer = tail->current_length +
923 tail->total_length_not_including_first_buffer;
924
925next_segment:
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000926 clib_atomic_add_fetch (&tail->n_add_refs, 1);
Damjan Marionc47ed032017-01-25 14:18:03 +0100927
928 if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
929 {
930 tail = vlib_get_buffer (vm, tail->next_buffer);
931 goto next_segment;
932 }
933}
934
Pierre Pfister328e99b2016-02-12 13:18:42 +0000935/* Initializes the buffer as an empty packet with no chained buffers. */
936always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400937vlib_buffer_chain_init (vlib_buffer_t * first)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000938{
939 first->total_length_not_including_first_buffer = 0;
940 first->current_length = 0;
941 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
942 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000943}
944
945/* The provided next_bi buffer index is appended to the end of the packet. */
946always_inline vlib_buffer_t *
Eyal Barib688fb12018-11-12 16:13:49 +0200947vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000948{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400949 vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000950 last->next_buffer = next_bi;
951 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
952 next_buffer->current_length = 0;
953 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000954 return next_buffer;
955}
956
957/* Increases or decreases the packet length.
958 * It does not allocate or deallocate new buffers.
959 * Therefore, the added length must be compatible
960 * with the last buffer. */
961always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400962vlib_buffer_chain_increase_length (vlib_buffer_t * first,
963 vlib_buffer_t * last, i32 len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000964{
965 last->current_length += len;
966 if (first != last)
967 first->total_length_not_including_first_buffer += len;
Pierre Pfister328e99b2016-02-12 13:18:42 +0000968}
969
970/* Copy data to the end of the packet and increases its length.
971 * It does not allocate new buffers.
972 * Returns the number of copied bytes. */
973always_inline u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400974vlib_buffer_chain_append_data (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100975 vlib_buffer_free_list_index_t free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400976 vlib_buffer_t * first,
977 vlib_buffer_t * last, void *data, u16 data_len)
Pierre Pfister328e99b2016-02-12 13:18:42 +0000978{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400979 u32 n_buffer_bytes =
980 vlib_buffer_free_list_buffer_size (vm, free_list_index);
981 ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
982 u16 len = clib_min (data_len,
983 n_buffer_bytes - last->current_length -
984 last->current_data);
Dave Barach178cf492018-11-13 16:34:13 -0500985 clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length,
986 data, len);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400987 vlib_buffer_chain_increase_length (first, last, len);
Pierre Pfister328e99b2016-02-12 13:18:42 +0000988 return len;
989}
990
991/* Copy data to the end of the packet and increases its length.
992 * Allocates additional buffers from the free list if necessary.
993 * Returns the number of copied bytes.
994 * 'last' value is modified whenever new buffers are allocated and
995 * chained and points to the last buffer in the chain. */
996u16
Dave Barach9b8ffd92016-07-08 08:13:45 -0400997vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
Damjan Mariondac03522018-02-01 15:30:13 +0100998 vlib_buffer_free_list_index_t
999 free_list_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001000 vlib_buffer_t * first,
Damjan Mariondac03522018-02-01 15:30:13 +01001001 vlib_buffer_t ** last, void *data,
1002 u16 data_len);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001003void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001004
Dave Barach9b8ffd92016-07-08 08:13:45 -04001005format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
1006 format_vlib_buffer_contents;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001007
Dave Barach9b8ffd92016-07-08 08:13:45 -04001008typedef struct
1009{
Ed Warnickecb9cada2015-12-08 15:45:58 -07001010 /* Vector of packet data. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001011 u8 *packet_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001012
Damjan Mariond1274cb2018-03-13 21:32:17 +01001013 /* Number of buffers to allocate in each call to allocator. */
1014 u32 min_n_buffers_each_alloc;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015
1016 /* Buffer free list for this template. */
Damjan Mariondac03522018-02-01 15:30:13 +01001017 vlib_buffer_free_list_index_t free_list_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001018
Dave Barach9b8ffd92016-07-08 08:13:45 -04001019 u32 *free_buffers;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001020} vlib_packet_template_t;
1021
1022void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
1023 vlib_packet_template_t * t);
1024
1025void vlib_packet_template_init (vlib_main_t * vm,
1026 vlib_packet_template_t * t,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001027 void *packet_data,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001028 uword n_packet_data_bytes,
Damjan Mariond1274cb2018-03-13 21:32:17 +01001029 uword min_n_buffers_each_alloc,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001030 char *fmt, ...);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001031
Dave Barach9b8ffd92016-07-08 08:13:45 -04001032void *vlib_packet_template_get_packet (vlib_main_t * vm,
1033 vlib_packet_template_t * t,
1034 u32 * bi_result);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001035
1036always_inline void
1037vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
1038{
1039 vec_free (t->packet_data);
1040}
1041
1042always_inline u32
1043unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
1044{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001045 serialize_stream_t *s = &m->stream;
1046 vlib_serialize_buffer_main_t *sm
1047 = uword_to_pointer (m->stream.data_function_opaque,
1048 vlib_serialize_buffer_main_t *);
1049 vlib_main_t *vm = sm->vlib_main;
1050 u32 n, *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001051
1052 n = s->n_buffer_bytes - s->current_buffer_index;
1053 if (sm->last_buffer != ~0)
1054 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001055 vlib_buffer_t *b = vlib_get_buffer (vm, sm->last_buffer);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001056 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1057 {
1058 b = vlib_get_buffer (vm, b->next_buffer);
1059 n += b->current_length;
1060 }
1061 }
1062
Dave Barach9b8ffd92016-07-08 08:13:45 -04001063 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001064 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
1065 n += vlib_buffer_index_length_in_chain (vm, f[0]);
1066 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -04001067/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001068
1069 return n;
1070}
1071
Ed Warnickecb9cada2015-12-08 15:45:58 -07001072/* Set a buffer quickly into "uninitialized" state. We want this to
1073 be extremely cheap and arrange for all fields that need to be
1074 initialized to be in the first 128 bits of the buffer. */
1075always_inline void
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001076vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001077 vlib_buffer_free_list_t * fl)
1078{
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001079 vlib_buffer_t *src = &fl->buffer_init_template;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001080
Damjan Marion19010202016-03-24 17:17:47 +01001081 /* Make sure vlib_buffer_t is cacheline aligned and sized */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001082 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
1083 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline1) ==
1084 CLIB_CACHE_LINE_BYTES);
1085 ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline2) ==
1086 CLIB_CACHE_LINE_BYTES * 2);
Damjan Marion19010202016-03-24 17:17:47 +01001087
Ed Warnickecb9cada2015-12-08 15:45:58 -07001088 /* Make sure buffer template is sane. */
Damjan Marion072401e2017-07-13 18:53:27 +02001089 ASSERT (fl->index == vlib_buffer_get_free_list_index (src));
Ed Warnickecb9cada2015-12-08 15:45:58 -07001090
Dave Barach178cf492018-11-13 16:34:13 -05001091 clib_memcpy_fast (STRUCT_MARK_PTR (dst, template_start),
1092 STRUCT_MARK_PTR (src, template_start),
1093 STRUCT_OFFSET_OF (vlib_buffer_t, template_end) -
1094 STRUCT_OFFSET_OF (vlib_buffer_t, template_start));
Dave Barachf8690282017-03-01 11:38:02 -05001095
1096 /* Not in the first 16 octets. */
1097 dst->n_add_refs = src->n_add_refs;
Damjan Mariondac03522018-02-01 15:30:13 +01001098 vlib_buffer_set_free_list_index (dst, fl->index);
Dave Barachf8690282017-03-01 11:38:02 -05001099
Ed Warnickecb9cada2015-12-08 15:45:58 -07001100 /* Make sure it really worked. */
Dave Barachf8690282017-03-01 11:38:02 -05001101#define _(f) ASSERT (dst->f == src->f);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001102 _(current_data);
1103 _(current_length);
1104 _(flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001105#undef _
Florin Corasb2215d62017-08-01 16:56:58 -07001106 /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
1107 /* total_length_not_including_first_buffer is not in the template anymore
1108 * so it may actually not zeroed for some buffers. One option is to
1109 * uncomment the line lower (comes at a cost), the other, is to just not
1110 * care */
1111 /* dst->total_length_not_including_first_buffer = 0; */
Damjan Marionc47ed032017-01-25 14:18:03 +01001112 ASSERT (dst->n_add_refs == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001113}
1114
1115always_inline void
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001116vlib_buffer_add_to_free_list (vlib_main_t * vm,
1117 vlib_buffer_free_list_t * f,
1118 u32 buffer_index, u8 do_init)
1119{
Damjan Mariond1274cb2018-03-13 21:32:17 +01001120 vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001121 vlib_buffer_t *b;
1122 b = vlib_get_buffer (vm, buffer_index);
1123 if (PREDICT_TRUE (do_init))
1124 vlib_buffer_init_for_free_list (b, f);
Damjan Marionbd69a5f2017-02-05 23:44:42 +01001125 vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001126
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001127 if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
Damjan Marion6b0f5892017-07-27 04:01:24 -04001128 {
Damjan Mariond1274cb2018-03-13 21:32:17 +01001129 clib_spinlock_lock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001130 /* keep last stored buffers, as they are more likely hot in the cache */
Damjan Mariond1274cb2018-03-13 21:32:17 +01001131 vec_add_aligned (bp->buffers, f->buffers, VLIB_FRAME_SIZE,
Damjan Marionb6a8ed72017-08-29 00:15:35 +02001132 CLIB_CACHE_LINE_BYTES);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001133 vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
Klement Sekera75e974b2017-11-09 09:12:12 +01001134 f->n_alloc -= VLIB_FRAME_SIZE;
Damjan Mariond1274cb2018-03-13 21:32:17 +01001135 clib_spinlock_unlock (&bp->lock);
Damjan Marion6b0f5892017-07-27 04:01:24 -04001136 }
Damjan Marion8a6a3b22017-01-17 14:12:42 +01001137}
1138
Ed Warnickecb9cada2015-12-08 15:45:58 -07001139#if CLIB_DEBUG > 0
Damjan Marion6a7acc22016-12-19 16:28:36 +01001140extern u32 *vlib_buffer_state_validation_lock;
1141extern uword *vlib_buffer_state_validation_hash;
1142extern void *vlib_buffer_state_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001143#endif
1144
Dave Barach9b8ffd92016-07-08 08:13:45 -04001145static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001146vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
1147{
1148#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001149 uword *p;
1150 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001151
1152 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1153
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001154 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001155 ;
1156
1157 p = hash_get (vlib_buffer_state_validation_hash, b);
1158
1159 /* If we don't know about b, declare it to be in the expected state */
1160 if (!p)
1161 {
1162 hash_set (vlib_buffer_state_validation_hash, b, expected);
1163 goto out;
1164 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001165
Ed Warnickecb9cada2015-12-08 15:45:58 -07001166 if (p[0] != expected)
1167 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001168 void cj_stop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001169 u32 bi;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001170 vlib_main_t *vm = &vlib_global_main;
1171
1172 cj_stop ();
1173
Ed Warnickecb9cada2015-12-08 15:45:58 -07001174 bi = vlib_get_buffer_index (vm, b);
1175
1176 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001177 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
1178 vlib_time_now (vm), bi,
1179 p[0] ? "busy" : "free", expected ? "busy" : "free");
1180 os_panic ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001181 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001182out:
1183 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001184 *vlib_buffer_state_validation_lock = 0;
1185 clib_mem_set_heap (oldheap);
1186#endif
1187}
1188
Dave Barach9b8ffd92016-07-08 08:13:45 -04001189static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -07001190vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
1191{
1192#if CLIB_DEBUG > 0
Dave Barach9b8ffd92016-07-08 08:13:45 -04001193 void *oldheap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001194
1195 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1196
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001197 while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001198 ;
1199
1200 hash_set (vlib_buffer_state_validation_hash, b, expected);
1201
Dave Barach9b8ffd92016-07-08 08:13:45 -04001202 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001203 *vlib_buffer_state_validation_lock = 0;
1204 clib_mem_set_heap (oldheap);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001205#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -07001206}
1207
Klement Sekera75e7d132017-09-20 08:26:30 +02001208/** minimum data size of first buffer in a buffer chain */
1209#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
1210
1211/**
1212 * @brief compress buffer chain in a way where the first buffer is at least
1213 * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
1214 *
1215 * @param[in] vm - vlib_main
1216 * @param[in,out] first - first buffer in chain
1217 * @param[in,out] discard_vector - vector of buffer indexes which were removed
1218 * from the chain
1219 */
1220always_inline void
1221vlib_buffer_chain_compress (vlib_main_t * vm,
1222 vlib_buffer_t * first, u32 ** discard_vector)
1223{
1224 if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
1225 !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
1226 {
1227 /* this is already big enough or not a chain */
1228 return;
1229 }
1230 /* probe free list to find allocated buffer size to avoid overfill */
Damjan Mariondac03522018-02-01 15:30:13 +01001231 vlib_buffer_free_list_index_t index;
Klement Sekera75e7d132017-09-20 08:26:30 +02001232 vlib_buffer_free_list_t *free_list =
1233 vlib_buffer_get_buffer_free_list (vm, first, &index);
1234
1235 u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
1236 free_list->n_data_bytes -
1237 first->current_data);
1238 do
1239 {
1240 vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
1241 u32 need = want_first_size - first->current_length;
1242 u32 amount_to_copy = clib_min (need, second->current_length);
Dave Barach178cf492018-11-13 16:34:13 -05001243 clib_memcpy_fast (((u8 *) vlib_buffer_get_current (first)) +
1244 first->current_length,
1245 vlib_buffer_get_current (second), amount_to_copy);
Klement Sekera75e7d132017-09-20 08:26:30 +02001246 first->current_length += amount_to_copy;
1247 vlib_buffer_advance (second, amount_to_copy);
1248 if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
1249 {
1250 first->total_length_not_including_first_buffer -= amount_to_copy;
1251 }
1252 if (!second->current_length)
1253 {
1254 vec_add1 (*discard_vector, first->next_buffer);
1255 if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
1256 {
1257 first->next_buffer = second->next_buffer;
1258 }
1259 else
1260 {
1261 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1262 }
1263 second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1264 }
1265 }
1266 while ((first->current_length < want_first_size) &&
1267 (first->flags & VLIB_BUFFER_NEXT_PRESENT));
1268}
1269
Eyal Barid3d42412018-11-05 13:29:25 +02001270/**
1271 * @brief linearize buffer chain - the first buffer is filled, if needed,
1272 * buffers are allocated and filled, returns free space in last buffer or
1273 * negative on failure
1274 *
1275 * @param[in] vm - vlib_main
1276 * @param[in,out] first - first buffer in chain
1277 */
1278always_inline int
1279vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * first)
1280{
1281 vlib_buffer_t *b = first;
1282 vlib_buffer_free_list_t *fl =
1283 vlib_buffer_get_free_list (vm, vlib_buffer_get_free_list_index (b));
1284 u32 buf_len = fl->n_data_bytes;
1285 // free buffer chain starting from the second buffer
1286 int free_count = (b->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
1287 u32 chain_to_free = b->next_buffer;
1288
1289 u32 len = vlib_buffer_length_in_chain (vm, b);
1290 u32 free_len = buf_len - b->current_data - b->current_length;
1291 int alloc_len = clib_max (len - free_len, 0); //use the free len in the first buffer
1292 int n_buffers = (alloc_len + buf_len - 1) / buf_len;
1293 u32 new_buffers[n_buffers];
1294
1295 u32 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
1296 if (n_alloc != n_buffers)
1297 {
1298 vlib_buffer_free_no_next (vm, new_buffers, n_alloc);
1299 return -1;
1300 }
1301
1302 vlib_buffer_t *s = b;
1303 while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
1304 {
1305 s = vlib_get_buffer (vm, s->next_buffer);
1306 int d_free_len = buf_len - b->current_data - b->current_length;
1307 ASSERT (d_free_len >= 0);
1308 // chain buf and split write
1309 u32 copy_len = clib_min (d_free_len, s->current_length);
1310 u8 *d = vlib_buffer_put_uninit (b, copy_len);
1311 clib_memcpy (d, vlib_buffer_get_current (s), copy_len);
1312 int rest = s->current_length - copy_len;
1313 if (rest > 0)
1314 {
1315 //prev buf is full
1316 ASSERT (vlib_buffer_get_tail (b) == b->data + buf_len);
1317 ASSERT (n_buffers > 0);
1318 b = vlib_buffer_chain_buffer (vm, b, new_buffers[--n_buffers]);
1319 //make full use of the new buffers
1320 b->current_data = 0;
1321 d = vlib_buffer_put_uninit (b, rest);
1322 clib_memcpy (d, vlib_buffer_get_current (s) + copy_len, rest);
1323 }
1324 }
1325 vlib_buffer_free (vm, &chain_to_free, free_count);
1326 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
1327 if (b == first) /* no buffers addeed */
1328 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
1329 ASSERT (len == vlib_buffer_length_in_chain (vm, first));
1330 ASSERT (n_buffers == 0);
1331 return buf_len - b->current_data - b->current_length;
1332}
1333
Ed Warnickecb9cada2015-12-08 15:45:58 -07001334#endif /* included_vlib_buffer_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001335
1336/*
1337 * fd.io coding-style-patch-verification: ON
1338 *
1339 * Local Variables:
1340 * eval: (c-set-style "gnu")
1341 * End:
1342 */