blob: 214a916263cd1c3d611b39a465ae1ea31e524b9f [file] [log] [blame]
Damjan Marion878c6092017-01-04 13:19:27 +01001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer.c: allocate/free network buffers.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40/**
41 * @file
42 *
43 * Allocate/free network buffers.
44 */
45
46#include <rte_config.h>
47
48#include <rte_common.h>
49#include <rte_log.h>
50#include <rte_memory.h>
51#include <rte_memzone.h>
52#include <rte_tailq.h>
53#include <rte_eal.h>
54#include <rte_per_lcore.h>
55#include <rte_launch.h>
56#include <rte_atomic.h>
57#include <rte_cycles.h>
58#include <rte_prefetch.h>
59#include <rte_lcore.h>
60#include <rte_per_lcore.h>
61#include <rte_branch_prediction.h>
62#include <rte_interrupts.h>
63#include <rte_pci.h>
64#include <rte_random.h>
65#include <rte_debug.h>
66#include <rte_ether.h>
67#include <rte_ethdev.h>
68#include <rte_ring.h>
69#include <rte_mempool.h>
70#include <rte_mbuf.h>
71#include <rte_version.h>
72
73#include <vlib/vlib.h>
74#include <vnet/vnet.h>
75#include <vnet/devices/dpdk/dpdk.h>
76#include <vnet/devices/dpdk/dpdk_priv.h>
77
78
79STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
80 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
81
82#define BUFFERS_PER_COPY (sizeof (vlib_copy_unit_t) / sizeof (u32))
83
84/* Make sure we have at least given number of unaligned buffers. */
85static void
86fill_unaligned (vlib_main_t * vm,
87 vlib_buffer_free_list_t * free_list,
88 uword n_unaligned_buffers)
89{
90 word la = vec_len (free_list->aligned_buffers);
91 word lu = vec_len (free_list->unaligned_buffers);
92
93 /* Aligned come in aligned copy-sized chunks. */
94 ASSERT (la % BUFFERS_PER_COPY == 0);
95
96 ASSERT (la >= n_unaligned_buffers);
97
98 while (lu < n_unaligned_buffers)
99 {
100 /* Copy 4 buffers from end of aligned vector to unaligned vector. */
101 vec_add (free_list->unaligned_buffers,
102 free_list->aligned_buffers + la - BUFFERS_PER_COPY,
103 BUFFERS_PER_COPY);
104 la -= BUFFERS_PER_COPY;
105 lu += BUFFERS_PER_COPY;
106 }
107 _vec_len (free_list->aligned_buffers) = la;
108}
109
110/* After free aligned buffers may not contain even sized chunks. */
111static void
112trim_aligned (vlib_buffer_free_list_t * f)
113{
114 uword l, n_trim;
115
116 /* Add unaligned to aligned before trim. */
117 l = vec_len (f->unaligned_buffers);
118 if (l > 0)
119 {
120 vec_add_aligned (f->aligned_buffers, f->unaligned_buffers, l,
121 /* align */ sizeof (vlib_copy_unit_t));
122
123 _vec_len (f->unaligned_buffers) = 0;
124 }
125
126 /* Remove unaligned buffers from end of aligned vector and save for next trim. */
127 l = vec_len (f->aligned_buffers);
128 n_trim = l % BUFFERS_PER_COPY;
129 if (n_trim)
130 {
131 /* Trim aligned -> unaligned. */
132 vec_add (f->unaligned_buffers, f->aligned_buffers + l - n_trim, n_trim);
133
134 /* Remove from aligned. */
135 _vec_len (f->aligned_buffers) = l - n_trim;
136 }
137}
138
139static void
140merge_free_lists (vlib_buffer_free_list_t * dst,
141 vlib_buffer_free_list_t * src)
142{
143 uword l;
144 u32 *d;
145
146 trim_aligned (src);
147 trim_aligned (dst);
148
149 l = vec_len (src->aligned_buffers);
150 if (l > 0)
151 {
152 vec_add2_aligned (dst->aligned_buffers, d, l,
153 /* align */ sizeof (vlib_copy_unit_t));
154 clib_memcpy (d, src->aligned_buffers, l * sizeof (d[0]));
155 vec_free (src->aligned_buffers);
156 }
157
158 l = vec_len (src->unaligned_buffers);
159 if (l > 0)
160 {
161 vec_add (dst->unaligned_buffers, src->unaligned_buffers, l);
162 vec_free (src->unaligned_buffers);
163 }
164}
165
166always_inline u32
167dpdk_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
168{
169 vlib_buffer_main_t *bm = vm->buffer_main;
170
171 size = vlib_buffer_round_size (size);
172 uword *p = hash_get (bm->free_list_by_size, size);
173 return p ? p[0] : ~0;
174}
175
176static void
177del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
178{
179 u32 i;
180 struct rte_mbuf *mb;
181 vlib_buffer_t *b;
182
183 for (i = 0; i < vec_len (f->unaligned_buffers); i++)
184 {
185 b = vlib_get_buffer (vm, f->unaligned_buffers[i]);
186 mb = rte_mbuf_from_vlib_buffer (b);
187 ASSERT (rte_mbuf_refcnt_read (mb) == 1);
188 rte_pktmbuf_free (mb);
189 }
190 for (i = 0; i < vec_len (f->aligned_buffers); i++)
191 {
192 b = vlib_get_buffer (vm, f->aligned_buffers[i]);
193 mb = rte_mbuf_from_vlib_buffer (b);
194 ASSERT (rte_mbuf_refcnt_read (mb) == 1);
195 rte_pktmbuf_free (mb);
196 }
197 vec_free (f->name);
198 vec_free (f->unaligned_buffers);
199 vec_free (f->aligned_buffers);
200}
201
202/* Add buffer free list. */
203static void
204dpdk_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
205{
206 vlib_buffer_main_t *bm = vm->buffer_main;
207 vlib_buffer_free_list_t *f;
208 u32 merge_index;
209 int i;
210
211 ASSERT (os_get_cpu_number () == 0);
212
213 f = vlib_buffer_get_free_list (vm, free_list_index);
214
215 merge_index = dpdk_buffer_get_free_list_with_size (vm, f->n_data_bytes);
216 if (merge_index != ~0 && merge_index != free_list_index)
217 {
218 merge_free_lists (pool_elt_at_index (bm->buffer_free_list_pool,
219 merge_index), f);
220 }
221
222 del_free_list (vm, f);
223
224 /* Poison it. */
225 memset (f, 0xab, sizeof (f[0]));
226
227 pool_put (bm->buffer_free_list_pool, f);
228
229 for (i = 1; i < vec_len (vlib_mains); i++)
230 {
231 bm = vlib_mains[i]->buffer_main;
232 f = vlib_buffer_get_free_list (vlib_mains[i], free_list_index);;
233 memset (f, 0xab, sizeof (f[0]));
234 pool_put (bm->buffer_free_list_pool, f);
235 }
236}
237
238/* Make sure free list has at least given number of free buffers. */
239static uword
240fill_free_list (vlib_main_t * vm,
241 vlib_buffer_free_list_t * fl, uword min_free_buffers)
242{
243 dpdk_main_t *dm = &dpdk_main;
244 vlib_buffer_t *b;
245 int n, i;
246 u32 bi;
247 u32 n_remaining = 0, n_alloc = 0;
248 unsigned socket_id = rte_socket_id ();
249 struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
250 struct rte_mbuf *mb;
251
252 /* Too early? */
253 if (PREDICT_FALSE (rmp == 0))
254 return 0;
255
256 trim_aligned (fl);
257
258 /* Already have enough free buffers on free list? */
259 n = min_free_buffers - vec_len (fl->aligned_buffers);
260 if (n <= 0)
261 return min_free_buffers;
262
263 /* Always allocate round number of buffers. */
264 n = round_pow2 (n, BUFFERS_PER_COPY);
265
266 /* Always allocate new buffers in reasonably large sized chunks. */
267 n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
268
269 vec_validate (vm->mbuf_alloc_list, n - 1);
270
271 if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0)
272 return 0;
273
274 _vec_len (vm->mbuf_alloc_list) = n;
275
276 for (i = 0; i < n; i++)
277 {
278 mb = vm->mbuf_alloc_list[i];
279
280 ASSERT (rte_mbuf_refcnt_read (mb) == 0);
281 rte_mbuf_refcnt_set (mb, 1);
282
283 b = vlib_buffer_from_rte_mbuf (mb);
284 bi = vlib_get_buffer_index (vm, b);
285
286 vec_add1_aligned (fl->aligned_buffers, bi, sizeof (vlib_copy_unit_t));
287 n_alloc++;
288 n_remaining--;
289
290 vlib_buffer_init_for_free_list (b, fl);
291
292 if (fl->buffer_init_function)
293 fl->buffer_init_function (vm, fl, &bi, 1);
294 }
295
296 fl->n_alloc += n;
297
298 return n;
299}
300
301always_inline uword
302copy_alignment (u32 * x)
303{
304 return (pointer_to_uword (x) / sizeof (x[0])) % BUFFERS_PER_COPY;
305}
306
307static u32
308alloc_from_free_list (vlib_main_t * vm,
309 vlib_buffer_free_list_t * free_list,
310 u32 * alloc_buffers, u32 n_alloc_buffers)
311{
312 u32 *dst, *u_src;
313 uword u_len, n_left;
314 uword n_unaligned_start, n_unaligned_end, n_filled;
315
316 n_left = n_alloc_buffers;
317 dst = alloc_buffers;
318 n_unaligned_start = ((BUFFERS_PER_COPY - copy_alignment (dst))
319 & (BUFFERS_PER_COPY - 1));
320
321 n_filled = fill_free_list (vm, free_list, n_alloc_buffers);
322 if (n_filled == 0)
323 return 0;
324
325 n_left = n_filled < n_left ? n_filled : n_left;
326 n_alloc_buffers = n_left;
327
328 if (n_unaligned_start >= n_left)
329 {
330 n_unaligned_start = n_left;
331 n_unaligned_end = 0;
332 }
333 else
334 n_unaligned_end = copy_alignment (dst + n_alloc_buffers);
335
336 fill_unaligned (vm, free_list, n_unaligned_start + n_unaligned_end);
337
338 u_len = vec_len (free_list->unaligned_buffers);
339 u_src = free_list->unaligned_buffers + u_len - 1;
340
341 if (n_unaligned_start)
342 {
343 uword n_copy = n_unaligned_start;
344 if (n_copy > n_left)
345 n_copy = n_left;
346 n_left -= n_copy;
347
348 while (n_copy > 0)
349 {
350 *dst++ = *u_src--;
351 n_copy--;
352 u_len--;
353 }
354
355 /* Now dst should be aligned. */
356 if (n_left > 0)
357 ASSERT (pointer_to_uword (dst) % sizeof (vlib_copy_unit_t) == 0);
358 }
359
360 /* Aligned copy. */
361 {
362 vlib_copy_unit_t *d, *s;
363 uword n_copy;
364
365 if (vec_len (free_list->aligned_buffers) <
366 ((n_left / BUFFERS_PER_COPY) * BUFFERS_PER_COPY))
367 abort ();
368
369 n_copy = n_left / BUFFERS_PER_COPY;
370 n_left = n_left % BUFFERS_PER_COPY;
371
372 /* Remove buffers from aligned free list. */
373 _vec_len (free_list->aligned_buffers) -= n_copy * BUFFERS_PER_COPY;
374
375 s = (vlib_copy_unit_t *) vec_end (free_list->aligned_buffers);
376 d = (vlib_copy_unit_t *) dst;
377
378 /* Fast path loop. */
379 while (n_copy >= 4)
380 {
381 d[0] = s[0];
382 d[1] = s[1];
383 d[2] = s[2];
384 d[3] = s[3];
385 n_copy -= 4;
386 s += 4;
387 d += 4;
388 }
389
390 while (n_copy >= 1)
391 {
392 d[0] = s[0];
393 n_copy -= 1;
394 s += 1;
395 d += 1;
396 }
397
398 dst = (void *) d;
399 }
400
401 /* Unaligned copy. */
402 ASSERT (n_unaligned_end == n_left);
403 while (n_left > 0)
404 {
405 *dst++ = *u_src--;
406 n_left--;
407 u_len--;
408 }
409
410 if (!free_list->unaligned_buffers)
411 ASSERT (u_len == 0);
412 else
413 _vec_len (free_list->unaligned_buffers) = u_len;
414
415 return n_alloc_buffers;
416}
417
418/* Allocate a given number of buffers into given array.
419 Returns number actually allocated which will be either zero or
420 number requested. */
421u32
422dpdk_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
423{
424 vlib_buffer_main_t *bm = vm->buffer_main;
425
426 return alloc_from_free_list
427 (vm,
428 pool_elt_at_index (bm->buffer_free_list_pool,
429 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX),
430 buffers, n_buffers);
431}
432
433
434u32
435dpdk_buffer_alloc_from_free_list (vlib_main_t * vm,
436 u32 * buffers,
437 u32 n_buffers, u32 free_list_index)
438{
439 vlib_buffer_main_t *bm = vm->buffer_main;
440 vlib_buffer_free_list_t *f;
441 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
442 return alloc_from_free_list (vm, f, buffers, n_buffers);
443}
444
445always_inline void
446add_buffer_to_free_list (vlib_main_t * vm,
447 vlib_buffer_free_list_t * f,
448 u32 buffer_index, u8 do_init)
449{
450 vlib_buffer_t *b;
451 b = vlib_get_buffer (vm, buffer_index);
452 if (PREDICT_TRUE (do_init))
453 vlib_buffer_init_for_free_list (b, f);
454 vec_add1_aligned (f->aligned_buffers, buffer_index,
455 sizeof (vlib_copy_unit_t));
456}
457
458always_inline vlib_buffer_free_list_t *
459buffer_get_free_list (vlib_main_t * vm, vlib_buffer_t * b, u32 * index)
460{
461 vlib_buffer_main_t *bm = vm->buffer_main;
462 u32 i;
463
464 *index = i = b->free_list_index;
465 return pool_elt_at_index (bm->buffer_free_list_pool, i);
466}
467
468static_always_inline void
469vlib_buffer_free_inline (vlib_main_t * vm,
470 u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
471{
472 vlib_buffer_main_t *bm = vm->buffer_main;
473 vlib_buffer_free_list_t *fl;
474 u32 fi;
475 int i;
476 u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
477 u32 follow_buffer_next);
478
479 cb = bm->buffer_free_callback;
480
481 if (PREDICT_FALSE (cb != 0))
482 n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next);
483
484 if (!n_buffers)
485 return;
486
487 for (i = 0; i < n_buffers; i++)
488 {
489 vlib_buffer_t *b;
490 struct rte_mbuf *mb;
491
492 b = vlib_get_buffer (vm, buffers[i]);
493
494 fl = buffer_get_free_list (vm, b, &fi);
495
496 /* The only current use of this callback: multicast recycle */
497 if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
498 {
499 int j;
500
501 add_buffer_to_free_list
502 (vm, fl, buffers[i], (b->flags & VLIB_BUFFER_RECYCLE) == 0);
503
504 for (j = 0; j < vec_len (bm->announce_list); j++)
505 {
506 if (fl == bm->announce_list[j])
507 goto already_announced;
508 }
509 vec_add1 (bm->announce_list, fl);
510 already_announced:
511 ;
512 }
513 else
514 {
515 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
516 {
517 mb = rte_mbuf_from_vlib_buffer (b);
518 ASSERT (rte_mbuf_refcnt_read (mb) == 1);
519 rte_pktmbuf_free (mb);
520 }
521 }
522 }
523 if (vec_len (bm->announce_list))
524 {
525 vlib_buffer_free_list_t *fl;
526 for (i = 0; i < vec_len (bm->announce_list); i++)
527 {
528 fl = bm->announce_list[i];
529 fl->buffers_added_to_freelist_function (vm, fl);
530 }
531 _vec_len (bm->announce_list) = 0;
532 }
533}
534
535static void
536dpdk_buffer_free (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
537{
538 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
539 1);
540}
541
542static void
543dpdk_buffer_free_no_next (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
544{
545 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
546 0);
547}
548
549static void
550dpdk_packet_template_init (vlib_main_t * vm,
551 void *vt,
552 void *packet_data,
553 uword n_packet_data_bytes,
554 uword min_n_buffers_each_physmem_alloc, u8 * name)
555{
556 vlib_packet_template_t *t = (vlib_packet_template_t *) vt;
557
558 vlib_worker_thread_barrier_sync (vm);
559 memset (t, 0, sizeof (t[0]));
560
561 vec_add (t->packet_data, packet_data, n_packet_data_bytes);
562
563 vlib_worker_thread_barrier_release (vm);
564}
565
566clib_error_t *
567vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
568 unsigned socket_id)
569{
570 dpdk_main_t *dm = &dpdk_main;
571 vlib_physmem_main_t *vpm = &vm->physmem_main;
572 struct rte_mempool *rmp;
573 int i;
574
575 vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
576
577 /* pool already exists, nothing to do */
578 if (dm->pktmbuf_pools[socket_id])
579 return 0;
580
581 u8 *pool_name = format (0, "mbuf_pool_socket%u%c", socket_id, 0);
582
583 rmp = rte_pktmbuf_pool_create ((char *) pool_name, /* pool name */
584 num_mbufs, /* number of mbufs */
585 512, /* cache size */
586 VLIB_BUFFER_HDR_SIZE, /* priv size */
587 VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE, /* dataroom size */
588 socket_id); /* cpu socket */
589
590 if (rmp)
591 {
592 {
593 uword this_pool_end;
594 uword this_pool_start;
595 uword this_pool_size;
596 uword save_vpm_start, save_vpm_end, save_vpm_size;
597 struct rte_mempool_memhdr *memhdr;
598
599 this_pool_start = ~0ULL;
600 this_pool_end = 0LL;
601
602 STAILQ_FOREACH (memhdr, &rmp->mem_list, next)
603 {
604 if (((uword) (memhdr->addr + memhdr->len)) > this_pool_end)
605 this_pool_end = (uword) (memhdr->addr + memhdr->len);
606 if (((uword) memhdr->addr) < this_pool_start)
607 this_pool_start = (uword) (memhdr->addr);
608 }
609 ASSERT (this_pool_start < ~0ULL && this_pool_end > 0);
610 this_pool_size = this_pool_end - this_pool_start;
611
612 if (CLIB_DEBUG > 1)
613 {
614 clib_warning ("%s: pool start %llx pool end %llx pool size %lld",
615 pool_name, this_pool_start, this_pool_end,
616 this_pool_size);
617 clib_warning
618 ("before: virtual.start %llx virtual.end %llx virtual.size %lld",
619 vpm->virtual.start, vpm->virtual.end, vpm->virtual.size);
620 }
621
622 save_vpm_start = vpm->virtual.start;
623 save_vpm_end = vpm->virtual.end;
624 save_vpm_size = vpm->virtual.size;
625
626 if ((this_pool_start < vpm->virtual.start) || vpm->virtual.start == 0)
627 vpm->virtual.start = this_pool_start;
628 if (this_pool_end > vpm->virtual.end)
629 vpm->virtual.end = this_pool_end;
630
631 vpm->virtual.size = vpm->virtual.end - vpm->virtual.start;
632
633 if (CLIB_DEBUG > 1)
634 {
635 clib_warning
636 ("after: virtual.start %llx virtual.end %llx virtual.size %lld",
637 vpm->virtual.start, vpm->virtual.end, vpm->virtual.size);
638 }
639
640 /* check if fits into buffer index range */
641 if ((u64) vpm->virtual.size >
642 ((u64) 1 << (32 + CLIB_LOG2_CACHE_LINE_BYTES)))
643 {
644 clib_warning ("physmem: virtual size out of range!");
645 vpm->virtual.start = save_vpm_start;
646 vpm->virtual.end = save_vpm_end;
647 vpm->virtual.size = save_vpm_size;
648 rmp = 0;
649 }
650 }
651 if (rmp)
652 {
653 dm->pktmbuf_pools[socket_id] = rmp;
654 vec_free (pool_name);
655 return 0;
656 }
657 }
658
659 vec_free (pool_name);
660
661 /* no usable pool for this socket, try to use pool from another one */
662 for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
663 {
664 if (dm->pktmbuf_pools[i])
665 {
666 clib_warning
667 ("WARNING: Failed to allocate mempool for CPU socket %u. "
668 "Threads running on socket %u will use socket %u mempool.",
669 socket_id, socket_id, i);
670 dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
671 return 0;
672 }
673 }
674
675 return clib_error_return (0, "failed to allocate mempool on socket %u",
676 socket_id);
677}
678
679#if CLIB_DEBUG > 0
680
681u32 *vlib_buffer_state_validation_lock;
682uword *vlib_buffer_state_validation_hash;
683void *vlib_buffer_state_heap;
684
685static clib_error_t *
686buffer_state_validation_init (vlib_main_t * vm)
687{
688 void *oldheap;
689
690 vlib_buffer_state_heap = mheap_alloc (0, 10 << 20);
691
692 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
693
694 vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword));
695 vec_validate_aligned (vlib_buffer_state_validation_lock, 0,
696 CLIB_CACHE_LINE_BYTES);
697 clib_mem_set_heap (oldheap);
698 return 0;
699}
700
701VLIB_INIT_FUNCTION (buffer_state_validation_init);
702#endif
703
704static vlib_buffer_callbacks_t callbacks = {
705 .vlib_buffer_alloc_cb = &dpdk_buffer_alloc,
706 .vlib_buffer_alloc_from_free_list_cb = &dpdk_buffer_alloc_from_free_list,
707 .vlib_buffer_free_cb = &dpdk_buffer_free,
708 .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next,
709 .vlib_packet_template_init_cb = &dpdk_packet_template_init,
710 .vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list,
711};
712
713static clib_error_t *
714dpdk_buffer_init (vlib_main_t * vm)
715{
716 vlib_buffer_cb_register (vm, &callbacks);
717 return 0;
718}
719
720VLIB_INIT_FUNCTION (dpdk_buffer_init);
721
722/** @endcond */
723/*
724 * fd.io coding-style-patch-verification: ON
725 *
726 * Local Variables:
727 * eval: (c-set-style "gnu")
728 * End:
729 */