buffers: buffer allocation improvements

- pass buffer pool name trough va
- make buffers naturaly aligned
- fix calculation of total number of buffers

Type: improvement
Change-Id: I6aebf249ebd67823b4632ac08905bfa3aa7d1ee5
Signed-off-by: Damjan Marion <damarion@cisco.com>
diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c
index 304d1ab..0068659 100644
--- a/src/vlib/buffer.c
+++ b/src/vlib/buffer.c
@@ -484,16 +484,18 @@
 }
 
 u8
-vlib_buffer_pool_create (vlib_main_t * vm, char *name, u32 data_size,
-			 u32 physmem_map_index)
+vlib_buffer_pool_create (vlib_main_t *vm, u32 data_size, u32 physmem_map_index,
+			 char *fmt, ...)
 {
   vlib_buffer_main_t *bm = vm->buffer_main;
   vlib_buffer_pool_t *bp;
   vlib_physmem_map_t *m = vlib_physmem_get_map (vm, physmem_map_index);
   uword start = pointer_to_uword (m->base);
   uword size = (uword) m->n_pages << m->log2_page_size;
-  uword i, j;
-  u32 alloc_size, n_alloc_per_page;
+  uword page_mask = ~pow2_mask (m->log2_page_size);
+  u8 *p;
+  u32 alloc_size;
+  va_list va;
 
   if (vec_len (bm->buffer_pools) >= 255)
     return ~0;
@@ -531,48 +533,57 @@
   bp->buffer_template.buffer_pool_index = bp->index;
   bp->buffer_template.ref_count = 1;
   bp->physmem_map_index = physmem_map_index;
-  bp->name = format (0, "%s%c", name, 0);
   bp->data_size = data_size;
   bp->numa_node = m->numa_node;
+  bp->log2_page_size = m->log2_page_size;
+
+  va_start (va, fmt);
+  bp->name = va_format (0, fmt, &va);
+  va_end (va);
 
   vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
 			CLIB_CACHE_LINE_BYTES);
 
   alloc_size = vlib_buffer_alloc_size (bm->ext_hdr_size, data_size);
-  n_alloc_per_page = (1ULL << m->log2_page_size) / alloc_size;
+  bp->alloc_size = alloc_size;
 
   /* preallocate buffer indices memory */
-  bp->n_buffers = m->n_pages * n_alloc_per_page;
-  bp->buffers = clib_mem_alloc_aligned (bp->n_buffers * sizeof (u32),
-					CLIB_CACHE_LINE_BYTES);
+  bp->buffers = clib_mem_alloc_aligned (
+    round_pow2 ((size / alloc_size) * sizeof (u32), CLIB_CACHE_LINE_BYTES),
+    CLIB_CACHE_LINE_BYTES);
 
   clib_spinlock_init (&bp->lock);
 
-  for (j = 0; j < m->n_pages; j++)
-    for (i = 0; i < n_alloc_per_page; i++)
-      {
-	u8 *p;
-	u32 bi;
+  p = m->base;
 
-	p = m->base + (j << m->log2_page_size) + i * alloc_size;
-	p += bm->ext_hdr_size;
+  /* start with naturally aligned address */
+  p += alloc_size - (uword) p % alloc_size;
 
-	/*
-	 * Waste 1 buffer (maximum) so that 0 is never a valid buffer index.
-	 * Allows various places to ASSERT (bi != 0). Much easier
-	 * than debugging downstream crashes in successor nodes.
-	 */
-	if (p == m->base)
-	  continue;
+  /*
+   * Waste 1 buffer (maximum) so that 0 is never a valid buffer index.
+   * Allows various places to ASSERT (bi != 0). Much easier
+   * than debugging downstream crashes in successor nodes.
+   */
+  if (p == m->base)
+    p += alloc_size;
 
-	vlib_buffer_copy_template ((vlib_buffer_t *) p, &bp->buffer_template);
+  for (; p < (u8 *) m->base + size - alloc_size; p += alloc_size)
+    {
+      vlib_buffer_t *b;
+      u32 bi;
 
-	bi = vlib_get_buffer_index (vm, (vlib_buffer_t *) p);
+      /* skip if buffer spans across page boundary */
+      if (((uword) p & page_mask) != ((uword) (p + alloc_size) & page_mask))
+	continue;
 
-	bp->buffers[bp->n_avail++] = bi;
+      b = (vlib_buffer_t *) (p + bm->ext_hdr_size);
+      vlib_buffer_copy_template (b, &bp->buffer_template);
+      bi = vlib_get_buffer_index (vm, b);
+      bp->buffers[bp->n_avail++] = bi;
+      vlib_get_buffer (vm, bi);
+    }
 
-	vlib_get_buffer (vm, bi);
-      }
+  bp->n_buffers = bp->n_avail;
 
   return bp->index;
 }
@@ -694,7 +705,6 @@
   vlib_buffer_main_t *bm = vm->buffer_main;
   u32 physmem_map_index;
   clib_error_t *error;
-  u8 *name = 0;
 
   if (bm->log2_page_size == CLIB_MEM_PAGE_SZ_UNKNOWN)
     {
@@ -725,14 +735,12 @@
     return error;
 
 buffer_pool_create:
-  name = format (name, "default-numa-%d%c", numa_node, 0);
-  *index = vlib_buffer_pool_create (vm, (char *) name,
-				    vlib_buffer_get_default_data_size (vm),
-				    physmem_map_index);
+  *index =
+    vlib_buffer_pool_create (vm, vlib_buffer_get_default_data_size (vm),
+			     physmem_map_index, "default-numa-%d", numa_node);
 
   if (*index == (u8) ~ 0)
     error = clib_error_return (0, "maximum number of buffer pools reached");
-  vec_free (name);
 
 
   return error;