buffers: reinitialize metadata, add additional validation
- DPDK overwrites metadata as part of rte_pktmbuf_init(...) so we need
reinitialize it
- additional checks added to ensure ref_count is never < 1
Change-Id: Ida336f81c4723e8f2e0ad4a70cb7b1ecfff978a0
Signed-off-by: Damjan Marion <damarion@cisco.com>
diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c
index 73310ef..21340e5 100644
--- a/src/plugins/dpdk/buffer.c
+++ b/src/plugins/dpdk/buffer.c
@@ -42,6 +42,7 @@
clib_error_t *
dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp)
{
+ uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
struct rte_mempool *mp, *nmp;
dpdk_mempool_private_t priv;
enum rte_iova_mode iova_mode;
@@ -88,24 +89,35 @@
iova_mode = rte_eal_iova_mode ();
/* populate mempool object buffer header */
+ /* *INDENT-OFF* */
vec_foreach (bi, bp->buffers)
- {
- struct rte_mempool_objhdr *hdr;
- vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
- struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
- hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
- hdr->mp = mp;
- hdr->iova = (iova_mode == RTE_IOVA_VA) ?
- pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb);
- STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
- STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
- mp->populated_size++;
- nmp->populated_size++;
- }
+ {
+ struct rte_mempool_objhdr *hdr;
+ vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
+ struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
+ hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
+ hdr->mp = mp;
+ hdr->iova = (iova_mode == RTE_IOVA_VA) ?
+ pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb);
+ STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
+ STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
+ mp->populated_size++;
+ nmp->populated_size++;
+ }
+ /* *INDENT-ON* */
/* call the object initializers */
rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
+ /* *INDENT-OFF* */
+ vec_foreach (bi, bp->buffers)
+ {
+ vlib_buffer_t *b;
+ b = vlib_buffer_ptr_from_index (buffer_mem_start, *bi, 0);
+ vlib_buffer_copy_template (b, &bp->buffer_template);
+ }
+ /* *INDENT-ON* */
+
/* map DMA pages if at least one physical device exists */
if (rte_eth_dev_count_avail ())
{
diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c
index 3e19411..701ddde 100644
--- a/src/vlib/buffer.c
+++ b/src/vlib/buffer.c
@@ -569,6 +569,7 @@
bi = vlib_get_buffer_index (vm, (vlib_buffer_t *) p);
vec_add1_aligned (bp->buffers, bi, CLIB_CACHE_LINE_BYTES);
+ vlib_get_buffer (vm, bi);
bp->n_buffers += 1;
}
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index ce8d1ef..37ddcd4 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -47,6 +47,34 @@
vlib buffer access methods.
*/
+always_inline void
+vlib_buffer_validate (vlib_main_t * vm, vlib_buffer_t * b)
+{
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_pool_t *bp;
+
+ /* reference count in allocated buffer always must be 1 or higher */
+ ASSERT (b->ref_count > 0);
+
+ /* verify that buffer pointer is from buffer memory range */
+ ASSERT (pointer_to_uword (b) >= bm->buffer_mem_start);
+ ASSERT (pointer_to_uword (b) < bm->buffer_mem_start + bm->buffer_mem_size -
+ VLIB_BUFFER_DATA_SIZE);
+
+ /* verify that buffer pool index is valid */
+ bp = vec_elt_at_index (bm->buffer_pools, b->buffer_pool_index);
+ ASSERT (pointer_to_uword (b) >= bp->start);
+ ASSERT (pointer_to_uword (b) < bp->start + bp->size -
+ VLIB_BUFFER_DATA_SIZE);
+}
+
+always_inline void *
+vlib_buffer_ptr_from_index (uword buffer_mem_start, u32 buffer_index,
+ uword offset)
+{
+ offset += ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
+ return uword_to_pointer (buffer_mem_start + offset, vlib_buffer_t *);
+}
/** \brief Translate buffer index into buffer pointer
@@ -58,10 +86,11 @@
vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
{
vlib_buffer_main_t *bm = vm->buffer_main;
- uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES;
- ASSERT (offset < bm->buffer_mem_size);
+ vlib_buffer_t *b;
- return uword_to_pointer (bm->buffer_mem_start + offset, void *);
+ b = vlib_buffer_ptr_from_index (bm->buffer_mem_start, buffer_index, 0);
+ vlib_buffer_validate (vm, b);
+ return b;
}
static_always_inline void
@@ -108,9 +137,7 @@
vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
i32 offset)
{
-#if defined (CLIB_HAVE_VEC256) || defined (CLIB_HAVE_VEC128)
uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
-#endif
#ifdef CLIB_HAVE_VEC256
u64x4 off = u64x4_splat (buffer_mem_start + offset);
/* if count is not const, compiler will not unroll while loop
@@ -146,10 +173,10 @@
u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
#else
- b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
- b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset;
- b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset;
- b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset;
+ b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
+ b[1] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[1], offset);
+ b[2] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[2], offset);
+ b[3] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[3], offset);
#endif
b += 4;
bi += 4;
@@ -157,7 +184,7 @@
}
while (count)
{
- b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset;
+ b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
b += 1;
bi += 1;
count -= 1;
@@ -710,6 +737,11 @@
vlib_buffer_validate_alloc_free (vm, buffers, 4,
VLIB_BUFFER_KNOWN_ALLOCATED);
+ vlib_buffer_validate (vm, b[0]);
+ vlib_buffer_validate (vm, b[1]);
+ vlib_buffer_validate (vm, b[2]);
+ vlib_buffer_validate (vm, b[3]);
+
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
@@ -748,9 +780,7 @@
}
}
- ASSERT (pointer_to_uword (b[0]) >= bp->start &&
- pointer_to_uword (b[0]) <
- bp->start + bp->size - (bp->data_size + sizeof (*b[0])));
+ vlib_buffer_validate (vm, b[0]);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);