svm: split fifo into private and shared structs

Type: improvement

Signed-off-by: Florin Coras <fcoras@cisco.com>
Change-Id: Id8e77e8b2623be719fd43a95e181eaa5b7df2b6e
diff --git a/src/svm/fifo_segment.c b/src/svm/fifo_segment.c
index b4c3cbe..19fd052 100644
--- a/src/svm/fifo_segment.c
+++ b/src/svm/fifo_segment.c
@@ -14,6 +14,7 @@
  */
 
 #include <svm/fifo_segment.h>
+#include <vppinfra/mem.h>
 
 static inline void *
 fsh_alloc_aligned (fifo_segment_header_t *fsh, uword size, uword align)
@@ -239,7 +240,7 @@
 int
 fifo_segment_init (fifo_segment_t * fs)
 {
-  u32 align = 8, offset = 2 * 4096, slices_sz;
+  u32 align = 8, offset = 2 * 4096, slices_sz, i;
   uword max_fifo, seg_start, seg_sz;
   fifo_segment_header_t *fsh;
   ssvm_shared_header_t *sh;
@@ -270,6 +271,11 @@
   fs->max_byte_index = fsh->max_byte_index;
   fs->h = sh->opaque[0] = fsh;
 
+  vec_validate (fs->slices, fs->n_slices - 1);
+  for (i = 0; i < fs->n_slices; i++)
+    fs->slices[i].fifos =
+      clib_mem_bulk_init (sizeof (svm_fifo_t), CLIB_CACHE_LINE_BYTES, 32);
+
   sh->ready = 1;
   return (0);
 }
@@ -343,6 +349,9 @@
     goto done;
 
   fs->max_byte_index = fsh->max_byte_index;
+  vec_validate (fs->slices, 0);
+  fs->slices[0].fifos =
+    clib_mem_bulk_init (sizeof (svm_fifo_t), CLIB_CACHE_LINE_BYTES, 32);
 
 done:
   vec_add1 (a->new_segment_indices, fs - sm->segments);
@@ -352,6 +361,7 @@
 void
 fifo_segment_delete (fifo_segment_main_t * sm, fifo_segment_t * s)
 {
+  fifo_segment_cleanup (s);
   ssvm_delete (&s->ssvm);
   clib_memset (s, 0xfe, sizeof (*s));
   pool_put (sm->segments, s);
@@ -478,7 +488,7 @@
 fsh_try_alloc_fifo_hdr_batch (fifo_segment_header_t * fsh,
 			      fifo_segment_slice_t * fss, u32 batch_size)
 {
-  svm_fifo_t *f;
+  svm_fifo_shared_t *f;
   uword size;
   u8 *fmem;
   int i;
@@ -494,7 +504,7 @@
   /* Carve fifo hdr space */
   for (i = 0; i < batch_size; i++)
     {
-      f = (svm_fifo_t *) fmem;
+      f = (svm_fifo_shared_t *) fmem;
       memset (f, 0, sizeof (*f));
       f->next = fss->free_fifos;
       fss->free_fifos = f;
@@ -555,11 +565,10 @@
   return fsh_try_alloc_chunk_batch (fsh, fss, fl_index, batch_size);
 }
 
-static svm_fifo_t *
-fsh_try_alloc_fifo_hdr (fifo_segment_header_t * fsh,
-			fifo_segment_slice_t * fss)
+static svm_fifo_shared_t *
+fsh_try_alloc_fifo_hdr (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss)
 {
-  svm_fifo_t *f;
+  svm_fifo_shared_t *f;
 
   if (!fss->free_fifos)
     {
@@ -642,40 +651,41 @@
  * - single fifo allocation
  * - grab multiple fifo chunks from freelists
  */
-static svm_fifo_t *
-fs_try_alloc_fifo (fifo_segment_header_t * fsh, fifo_segment_slice_t * fss,
-		   u32 data_bytes)
+static svm_fifo_shared_t *
+fs_try_alloc_fifo (fifo_segment_header_t *fsh, u32 slice_index, u32 data_bytes)
 {
+  fifo_segment_slice_t *fss;
   u32 fl_index, min_size;
   svm_fifo_chunk_t *c;
-  svm_fifo_t *f = 0;
+  svm_fifo_shared_t *sf = 0;
 
+  fss = fsh_slice_get (fsh, slice_index);
   min_size = clib_max ((fsh->pct_first_alloc * data_bytes) / 100, 4096);
   fl_index = fs_freelist_for_size (min_size);
 
   if (!fss_chunk_fl_index_is_valid (fss, fl_index))
     return 0;
 
-  f = fsh_try_alloc_fifo_hdr (fsh, fss);
-  if (!f)
+  sf = fsh_try_alloc_fifo_hdr (fsh, fss);
+  if (!sf)
     return 0;
 
   c = fsh_try_alloc_chunk (fsh, fss, min_size);
   if (!c)
     {
-      f->next = fss->free_fifos;
-      fss->free_fifos = f;
+      sf->next = fss->free_fifos;
+      fss->free_fifos = sf;
       return 0;
     }
 
-  f->start_chunk = c;
+  sf->start_chunk = c;
   while (c->next)
     c = c->next;
-  f->end_chunk = c;
-  f->size = data_bytes;
-  f->fs_hdr = fsh;
+  sf->end_chunk = c;
+  sf->size = data_bytes;
+  sf->slice_index = slice_index;
 
-  return f;
+  return sf;
 }
 
 svm_fifo_chunk_t *
@@ -720,6 +730,36 @@
   fsh_slice_collect_chunks (fsh, fss, c);
 }
 
+svm_fifo_t *
+fs_fifo_alloc (fifo_segment_t *fs, u32 slice_index)
+{
+  fifo_slice_private_t *pfss = &fs->slices[slice_index];
+  svm_fifo_t *f;
+
+  f = clib_mem_bulk_alloc (pfss->fifos);
+  clib_memset (f, 0, sizeof (*f));
+  return f;
+}
+
+void
+fs_fifo_free (fifo_segment_t *fs, svm_fifo_t *f)
+{
+  u32 slice_index = f->shr->slice_index;
+  fifo_slice_private_t *pfss;
+
+  pfss = &fs->slices[slice_index];
+  clib_mem_bulk_free (pfss->fifos, f);
+}
+
+void
+fifo_segment_cleanup (fifo_segment_t *fs)
+{
+  int slice_index;
+
+  for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
+    clib_mem_bulk_destroy (fs->slices[slice_index].fifos);
+}
+
 /**
  * Allocate fifo in fifo segment
  */
@@ -729,6 +769,7 @@
 {
   fifo_segment_header_t *fsh = fs->h;
   fifo_segment_slice_t *fss;
+  svm_fifo_shared_t *sf;
   svm_fifo_t *f = 0;
 
   ASSERT (slice_index < fs->n_slices);
@@ -736,15 +777,18 @@
   if (PREDICT_FALSE (data_bytes > 1 << fsh->max_log2_fifo_size))
     return 0;
 
-  fss = fsh_slice_get (fsh, slice_index);
-  f = fs_try_alloc_fifo (fsh, fss, data_bytes);
-  if (!f)
+  sf = fs_try_alloc_fifo (fsh, slice_index, data_bytes);
+  if (!sf)
     goto done;
 
-  f->slice_index = slice_index;
+  f = fs_fifo_alloc (fs, slice_index);
+  f->fs_hdr = fsh;
+  f->shr = sf;
 
   svm_fifo_init (f, data_bytes);
 
+  fss = fsh_slice_get (fsh, slice_index);
+
   /* If rx fifo type add to active fifos list. When cleaning up segment,
    * we need a list of active sessions that should be disconnected. Since
    * both rx and tx fifos keep pointers to the session, it's enough to track
@@ -762,6 +806,19 @@
   return (f);
 }
 
+svm_fifo_t *
+fifo_segment_alloc_fifo_w_shared (fifo_segment_t *fs, svm_fifo_shared_t *sf)
+{
+  svm_fifo_t *f = fs_fifo_alloc (fs, 0);
+  f->fs_hdr = fs->h;
+  f->shr = sf;
+
+  f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
+  f->segment_index = SVM_FIFO_INVALID_INDEX;
+  f->refcnt = 1;
+  return f;
+}
+
 /**
  * Free fifo allocated in fifo segment
  */
@@ -770,13 +827,35 @@
 {
   fifo_segment_header_t *fsh = fs->h;
   fifo_segment_slice_t *fss;
+  svm_fifo_shared_t *sf;
 
   ASSERT (f->refcnt > 0);
 
   if (--f->refcnt > 0)
     return;
 
-  fss = fsh_slice_get (fsh, f->slice_index);
+  /*
+   * Cleanup shared state
+   */
+
+  sf = f->shr;
+  fss = fsh_slice_get (fsh, sf->slice_index);
+
+  /* Free fifo chunks */
+  fsh_slice_collect_chunks (fsh, fss, sf->start_chunk);
+
+  sf->start_chunk = sf->end_chunk = 0;
+  sf->head_chunk = sf->tail_chunk = 0;
+
+  /* Add to free list */
+  sf->next = fss->free_fifos;
+  fss->free_fifos = sf;
+
+  fss->virtual_mem -= svm_fifo_size (f);
+
+  /*
+   *  Cleanup private state
+   */
 
   /* Remove from active list. Only rx fifos are tracked */
   if (f->flags & SVM_FIFO_F_LL_TRACKED)
@@ -785,28 +864,19 @@
       f->flags &= ~SVM_FIFO_F_LL_TRACKED;
     }
 
-  /* Free fifo chunks */
-  fsh_slice_collect_chunks (fsh, fss, f->start_chunk);
-
-  f->start_chunk = f->end_chunk = 0;
-  f->head_chunk = f->tail_chunk = f->ooo_enq = f->ooo_deq = 0;
-
-  /* not allocated on segment heap */
   svm_fifo_free_chunk_lookup (f);
   svm_fifo_free_ooo_data (f);
 
   if (CLIB_DEBUG)
     {
-      f->master_session_index = ~0;
+      sf->master_session_index = ~0;
       f->master_thread_index = ~0;
     }
 
-  fss->virtual_mem -= svm_fifo_size (f);
-
-  /* Add to free list */
-  f->next = fss->free_fifos;
+  f->ooo_enq = f->ooo_deq = 0;
   f->prev = 0;
-  fss->free_fifos = f;
+
+  fs_fifo_free (fs, f);
 
   fsh_active_fifos_update (fsh, -1);
 }
@@ -820,12 +890,12 @@
 
   ASSERT (f->refcnt == 1);
 
-  fss = fsh_slice_get (fs->h, f->slice_index);
+  fss = fsh_slice_get (fs->h, f->shr->slice_index);
   fss->virtual_mem -= svm_fifo_size (f);
   if (f->flags & SVM_FIFO_F_LL_TRACKED)
     fss_fifo_del_active_list (fss, f);
 
-  c = f->start_chunk;
+  c = f->shr->start_chunk;
   while (c)
     {
       fl_index = fs_freelist_for_size (c->length);
@@ -842,13 +912,13 @@
   svm_fifo_chunk_t *c;
   u32 fl_index;
 
-  f->slice_index = slice_index;
-  fss = fsh_slice_get (fs->h, f->slice_index);
+  f->shr->slice_index = slice_index;
+  fss = fsh_slice_get (fs->h, f->shr->slice_index);
   fss->virtual_mem += svm_fifo_size (f);
   if (f->flags & SVM_FIFO_F_LL_TRACKED)
     fss_fifo_add_active_list (fss, f);
 
-  c = f->start_chunk;
+  c = f->shr->start_chunk;
   while (c)
     {
       fl_index = fs_freelist_for_size (c->length);
@@ -966,7 +1036,7 @@
 static u32
 fs_slice_num_free_fifos (fifo_segment_slice_t * fss)
 {
-  svm_fifo_t *f;
+  svm_fifo_shared_t *f;
   u32 count = 0;
 
   f = fss->free_fifos;
diff --git a/src/svm/fifo_segment.h b/src/svm/fifo_segment.h
index 39c9454..006ffc4 100644
--- a/src/svm/fifo_segment.h
+++ b/src/svm/fifo_segment.h
@@ -69,6 +69,7 @@
   fifo_segment_header_t *h;	/**< fifo segment data */
   uword max_byte_index;
   u8 n_slices;			/**< number of fifo segment slices */
+  fifo_slice_private_t *slices; /**< private slice information */
 } fifo_segment_t;
 
 typedef struct
@@ -95,6 +96,7 @@
 int fifo_segment_attach (fifo_segment_main_t * sm,
 			 fifo_segment_create_args_t * a);
 void fifo_segment_delete (fifo_segment_main_t * sm, fifo_segment_t * fs);
+void fifo_segment_cleanup (fifo_segment_t *fs);
 fifo_segment_t *fifo_segment_get_segment (fifo_segment_main_t * sm,
 					  u32 fs_index);
 u32 fifo_segment_index (fifo_segment_main_t * sm, fifo_segment_t * fs);
@@ -112,6 +114,8 @@
 					     u32 slice_index,
 					     u32 data_bytes,
 					     fifo_segment_ftype_t ftype);
+svm_fifo_t *fifo_segment_alloc_fifo_w_shared (fifo_segment_t *fs,
+					      svm_fifo_shared_t *sf);
 
 /**
  * Free fifo allocated in fifo segment
diff --git a/src/svm/fifo_types.h b/src/svm/fifo_types.h
index e839e7e..434b2c3 100644
--- a/src/svm/fifo_types.h
+++ b/src/svm/fifo_types.h
@@ -59,59 +59,65 @@
   u32 action;
 } svm_fifo_trace_elem_t;
 
-typedef struct _svm_fifo
+typedef struct svm_fifo_shr_
 {
-  CLIB_CACHE_LINE_ALIGN_MARK (shared_first);
-  fifo_segment_header_t *fs_hdr;/**< fifo segment header for fifo */
+  CLIB_CACHE_LINE_ALIGN_MARK (shared);
   svm_fifo_chunk_t *start_chunk;/**< first chunk in fifo chunk list */
   svm_fifo_chunk_t *end_chunk;	/**< end chunk in fifo chunk list */
+  volatile u32 has_event;	/**< non-zero if deq event exists */
   u32 min_alloc;		/**< min chunk alloc if space available */
   u32 size;			/**< size of the fifo in bytes */
-  u8 flags;			/**< fifo flags */
-  u8 slice_index;		/**< segment slice for fifo */
-
-    CLIB_CACHE_LINE_ALIGN_MARK (shared_second);
-  volatile u32 has_event;	/**< non-zero if deq event exists */
   u32 master_session_index;	/**< session layer session index */
   u32 client_session_index;	/**< app session index */
-  u8 master_thread_index;	/**< session layer thread index */
-  u8 client_thread_index;	/**< app worker index */
-  i8 refcnt;			/**< reference count  */
-  u32 segment_manager;		/**< session layer segment manager index */
-  u32 segment_index;		/**< segment index in segment manager */
-  struct _svm_fifo *next;	/**< next in freelist/active chain */
-  struct _svm_fifo *prev;	/**< prev in active chain */
+  u8 slice_index;		/**< segment slice for fifo */
+  struct svm_fifo_shr_ *next;	/**< next in freelist/active chain */
 
-    CLIB_CACHE_LINE_ALIGN_MARK (consumer);
-  rb_tree_t ooo_deq_lookup;	/**< rbtree for ooo deq chunk lookup */
+  CLIB_CACHE_LINE_ALIGN_MARK (consumer);
   svm_fifo_chunk_t *head_chunk;	/**< tracks chunk where head lands */
-  svm_fifo_chunk_t *ooo_deq;	/**< last chunk used for ooo dequeue */
   u32 head;			/**< fifo head position/byte */
   volatile u32 want_deq_ntf;	/**< producer wants nudge */
   volatile u32 has_deq_ntf;
 
-    CLIB_CACHE_LINE_ALIGN_MARK (producer);
-  rb_tree_t ooo_enq_lookup;	/**< rbtree for ooo enq chunk lookup */
+  CLIB_CACHE_LINE_ALIGN_MARK (producer);
   u32 tail;			/**< fifo tail position/byte */
-  u32 ooos_list_head;		/**< Head of out-of-order linked-list */
   svm_fifo_chunk_t *tail_chunk;	/**< tracks chunk where tail lands */
-  svm_fifo_chunk_t *ooo_enq;	/**< last chunk used for ooo enqueue */
-  ooo_segment_t *ooo_segments;	/**< Pool of ooo segments */
-  u32 ooos_newest;		/**< Last segment to have been updated */
   volatile u8 n_subscribers;	/**< Number of subscribers for io events */
   u8 subscribers[SVM_FIFO_MAX_EVT_SUBSCRIBERS];
+} svm_fifo_shared_t;
+
+typedef struct _svm_fifo
+{
+  CLIB_CACHE_LINE_ALIGN_MARK (cacheline);
+  svm_fifo_shared_t *shr;	 /**< shared fifo in fifo segment memory */
+  fifo_segment_header_t *fs_hdr; /**< fifo segment header for fifo */
+  rb_tree_t ooo_enq_lookup;	 /**< rbtree for ooo enq chunk lookup */
+  rb_tree_t ooo_deq_lookup;	 /**< rbtree for ooo deq chunk lookup */
+  svm_fifo_chunk_t *ooo_deq;	 /**< last chunk used for ooo dequeue */
+  svm_fifo_chunk_t *ooo_enq;	/**< last chunk used for ooo enqueue */
+  ooo_segment_t *ooo_segments;	/**< Pool of ooo segments */
+  u32 ooos_list_head;		/**< Head of out-of-order linked-list */
+  u32 ooos_newest;		/**< Last segment to have been updated */
+
+  u8 flags;		  /**< fifo flags */
+  u8 master_thread_index; /**< session layer thread index */
+  u8 client_thread_index; /**< app worker index */
+  i8 refcnt;		  /**< reference count  */
+  u32 segment_manager;	  /**< session layer segment manager index */
+  u32 segment_index;	  /**< segment index in segment manager */
+
+  struct _svm_fifo *next; /**< prev in active chain */
+  struct _svm_fifo *prev; /**< prev in active chain */
 
 #if SVM_FIFO_TRACE
   svm_fifo_trace_elem_t *trace;
 #endif
-
 } svm_fifo_t;
 
 typedef struct fifo_segment_slice_
 {
   svm_fifo_chunk_t *free_chunks[FS_CHUNK_VEC_LEN]; /**< Free chunks by size */
   svm_fifo_t *fifos;			/**< Linked list of active RX fifos */
-  svm_fifo_t *free_fifos;		/**< Freelists by fifo size  */
+  svm_fifo_shared_t *free_fifos;	/**< Freelists of fifo shared hdrs  */
   uword n_fl_chunk_bytes;		/**< Chunk bytes on freelist */
   uword virtual_mem;			/**< Slice sum of all fifo sizes */
   u32 num_chunks[FS_CHUNK_VEC_LEN];	/**< Allocated chunks by chunk size */
@@ -120,6 +126,12 @@
   u32 chunk_lock;
 } fifo_segment_slice_t;
 
+typedef struct fifo_slice_private_
+{
+  clib_mem_bulk_handle_t fifos; /**< Bulk fifo allocator */
+  uword virtual_mem;		/**< Slice sum of all fifo sizes */
+} fifo_slice_private_t;
+
 struct fifo_segment_header_
 {
   uword n_cached_bytes;			/**< Cached bytes */
diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c
index 0c08dba..2f910e0 100644
--- a/src/svm/svm_fifo.c
+++ b/src/svm/svm_fifo.c
@@ -348,7 +348,7 @@
 	}
     }
 
-  ASSERT (bytes <= f->size);
+  ASSERT (bytes <= f->shr->size);
   return bytes;
 }
 
@@ -372,23 +372,23 @@
   svm_fifo_chunk_t *c, *prev;
   u32 min_alloc;
 
-  f->size = size;
+  f->shr->size = size;
   f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
   f->segment_index = SVM_FIFO_INVALID_INDEX;
   f->refcnt = 1;
-  f->head = f->tail = f->flags = 0;
-  f->head_chunk = f->tail_chunk = f->start_chunk;
+  f->shr->head = f->shr->tail = f->flags = 0;
+  f->shr->head_chunk = f->shr->tail_chunk = f->shr->start_chunk;
   f->ooo_deq = f->ooo_enq = 0;
 
   min_alloc = size > 32 << 10 ? size >> 3 : 4096;
   min_alloc = clib_min (min_alloc, 64 << 10);
-  f->min_alloc = min_alloc;
+  f->shr->min_alloc = min_alloc;
 
   /*
    * Initialize chunks
    */
-  f->start_chunk->start_byte = 0;
-  prev = f->start_chunk;
+  f->shr->start_chunk->start_byte = 0;
+  prev = f->shr->start_chunk;
   prev->enq_rb_index = prev->deq_rb_index = RBTREE_TNIL_INDEX;
   c = prev->next;
 
@@ -447,7 +447,7 @@
   c->length = data_size_in_bytes;
   c->enq_rb_index = RBTREE_TNIL_INDEX;
   c->deq_rb_index = RBTREE_TNIL_INDEX;
-  f->start_chunk = f->end_chunk = c;
+  f->shr->start_chunk = f->shr->end_chunk = c;
 
   return f;
 }
@@ -486,7 +486,7 @@
 {
   svm_fifo_chunk_t *c;
 
-  c = f->start_chunk;
+  c = f->shr->start_chunk;
   while (c && !f_chunk_includes_pos (c, pos))
     c = c->next;
 
@@ -513,16 +513,17 @@
   u32 head, tail, end_chunk;
 
   f_load_head_tail_cons (f, &head, &tail);
-  ASSERT (!f->head_chunk || f_chunk_includes_pos (f->head_chunk, head));
+  ASSERT (!f->shr->head_chunk ||
+	  f_chunk_includes_pos (f->shr->head_chunk, head));
 
-  if (!f->head_chunk)
+  if (!f->shr->head_chunk)
     {
-      f->head_chunk = svm_fifo_find_chunk (f, head);
-      if (PREDICT_FALSE (!f->head_chunk))
+      f->shr->head_chunk = svm_fifo_find_chunk (f, head);
+      if (PREDICT_FALSE (!f->shr->head_chunk))
 	return 0;
     }
 
-  end_chunk = f_chunk_end (f->head_chunk);
+  end_chunk = f_chunk_end (f->shr->head_chunk);
 
   return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
 }
@@ -533,9 +534,10 @@
   u32 head, tail;
 
   f_load_head_tail_prod (f, &head, &tail);
-  ASSERT (!f->tail_chunk || f_chunk_includes_pos (f->tail_chunk, tail));
+  ASSERT (!f->shr->tail_chunk ||
+	  f_chunk_includes_pos (f->shr->tail_chunk, tail));
 
-  return f->tail_chunk ? f_chunk_end (f->tail_chunk) - tail : 0;
+  return f->shr->tail_chunk ? f_chunk_end (f->shr->tail_chunk) - tail : 0;
 }
 
 static rb_node_t *
@@ -605,13 +607,13 @@
   /* Use linear search if rbtree is not initialized */
   if (PREDICT_FALSE (!rb_tree_is_init (rt)))
     {
-      f->ooo_enq = svm_fifo_find_next_chunk (f, f->tail_chunk, start_pos);
+      f->ooo_enq = svm_fifo_find_next_chunk (f, f->shr->tail_chunk, start_pos);
       return;
     }
 
   if (rt->root == RBTREE_TNIL_INDEX)
     {
-      c = f->tail_chunk;
+      c = f->shr->tail_chunk;
       ASSERT (c->enq_rb_index == RBTREE_TNIL_INDEX);
       c->enq_rb_index = rb_tree_add_custom (rt, c->start_byte,
 					    pointer_to_uword (c), f_pos_lt);
@@ -660,7 +662,7 @@
 
   if (rt->root == RBTREE_TNIL_INDEX)
     {
-      c = f->start_chunk;
+      c = f->shr->start_chunk;
       ASSERT (c->deq_rb_index == RBTREE_TNIL_INDEX);
       c->deq_rb_index = rb_tree_add_custom (rt, c->start_byte,
 					    pointer_to_uword (c), f_pos_lt);
@@ -778,14 +780,14 @@
   u32 head, tail, head_idx;
   svm_fifo_chunk_t *c;
 
-  ASSERT (len <= f->size);
+  ASSERT (len <= f->shr->size);
 
   f_load_head_tail_cons (f, &head, &tail);
 
-  if (!f->head_chunk)
-    f->head_chunk = svm_fifo_find_chunk (f, head);
+  if (!f->shr->head_chunk)
+    f->shr->head_chunk = svm_fifo_find_chunk (f, head);
 
-  c = f->head_chunk;
+  c = f->shr->head_chunk;
   head_idx = head - c->start_byte;
   n_chunk = c->length - head_idx;
   if (len <= n_chunk)
@@ -804,17 +806,17 @@
   svm_fifo_chunk_t *c, *cur, *prev;
   u32 alloc_size, free_alloced;
 
-  free_alloced = f_chunk_end (f->end_chunk) - tail;
+  free_alloced = f_chunk_end (f->shr->end_chunk) - tail;
 
-  alloc_size = clib_min (f->min_alloc, f->size - (tail - head));
+  alloc_size = clib_min (f->shr->min_alloc, f->shr->size - (tail - head));
   alloc_size = clib_max (alloc_size, len - free_alloced);
 
-  c = fsh_alloc_chunk (f->fs_hdr, f->slice_index, alloc_size);
+  c = fsh_alloc_chunk (f->fs_hdr, f->shr->slice_index, alloc_size);
   if (PREDICT_FALSE (!c))
     return -1;
 
   cur = c;
-  prev = f->end_chunk;
+  prev = f->shr->end_chunk;
 
   while (cur)
     {
@@ -827,11 +829,11 @@
     }
 
   prev->next = 0;
-  f->end_chunk->next = c;
-  f->end_chunk = prev;
+  f->shr->end_chunk->next = c;
+  f->shr->end_chunk = prev;
 
-  if (!f->tail_chunk)
-    f->tail_chunk = c;
+  if (!f->shr->tail_chunk)
+    f->shr->tail_chunk = c;
 
   return 0;
 }
@@ -855,19 +857,20 @@
   /* number of bytes we're going to copy */
   len = clib_min (free_count, len);
 
-  if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
+  if (f_pos_gt (tail + len, f_chunk_end (f->shr->end_chunk)))
     {
       if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
 	{
-	  len = f_chunk_end (f->end_chunk) - tail;
+	  len = f_chunk_end (f->shr->end_chunk) - tail;
 	  if (!len)
 	    return SVM_FIFO_EGROW;
 	}
     }
 
-  old_tail_c = f->tail_chunk;
+  old_tail_c = f->shr->tail_chunk;
 
-  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, src, len, &f->tail_chunk);
+  svm_fifo_copy_to_chunk (f, f->shr->tail_chunk, tail, src, len,
+			  &f->shr->tail_chunk);
   tail = tail + len;
 
   svm_fifo_trace_add (f, head, len, 2);
@@ -877,12 +880,12 @@
     {
       len += ooo_segment_try_collect (f, len, &tail);
       /* Tail chunk might've changed even if nothing was collected */
-      f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
+      f->shr->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
       f->ooo_enq = 0;
     }
 
   /* store-rel: producer owned index (paired with load-acq in consumer) */
-  clib_atomic_store_rel_n (&f->tail, tail);
+  clib_atomic_store_rel_n (&f->shr->tail, tail);
 
   return len;
 }
@@ -911,7 +914,7 @@
 
   enq_pos = tail + offset;
 
-  if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk)))
+  if (f_pos_gt (enq_pos + len, f_chunk_end (f->shr->end_chunk)))
     {
       if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, offset + len)))
 	return SVM_FIFO_EGROW;
@@ -938,21 +941,23 @@
 
   ASSERT (len <= svm_fifo_max_enqueue_prod (f));
   /* load-relaxed: producer owned index */
-  tail = f->tail;
+  tail = f->shr->tail;
   tail = tail + len;
 
   if (rb_tree_is_init (&f->ooo_enq_lookup))
     {
-      f->tail_chunk = f_lookup_clear_enq_chunks (f, f->tail_chunk, tail);
+      f->shr->tail_chunk =
+	f_lookup_clear_enq_chunks (f, f->shr->tail_chunk, tail);
       f->ooo_enq = 0;
     }
   else
     {
-      f->tail_chunk = svm_fifo_find_next_chunk (f, f->tail_chunk, tail);
+      f->shr->tail_chunk =
+	svm_fifo_find_next_chunk (f, f->shr->tail_chunk, tail);
     }
 
   /* store-rel: producer owned index (paired with load-acq in consumer) */
-  clib_atomic_store_rel_n (&f->tail, tail);
+  clib_atomic_store_rel_n (&f->shr->tail, tail);
 }
 
 int
@@ -975,14 +980,14 @@
   for (i = 0; i < n_segs; i++)
     len += segs[i].len;
 
-  old_tail_c = f->tail_chunk;
+  old_tail_c = f->shr->tail_chunk;
 
   if (!allow_partial)
     {
       if (PREDICT_FALSE (free_count < len))
 	return SVM_FIFO_EFULL;
 
-      if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
+      if (f_pos_gt (tail + len, f_chunk_end (f->shr->end_chunk)))
 	{
 	  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
 	    return SVM_FIFO_EGROW;
@@ -990,8 +995,8 @@
 
       for (i = 0; i < n_segs; i++)
 	{
-	  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, segs[i].data,
-				  segs[i].len, &f->tail_chunk);
+	  svm_fifo_copy_to_chunk (f, f->shr->tail_chunk, tail, segs[i].data,
+				  segs[i].len, &f->shr->tail_chunk);
 	  tail += segs[i].len;
 	}
     }
@@ -999,11 +1004,11 @@
     {
       len = clib_min (free_count, len);
 
-      if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
+      if (f_pos_gt (tail + len, f_chunk_end (f->shr->end_chunk)))
 	{
 	  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
 	    {
-	      len = f_chunk_end (f->end_chunk) - tail;
+	      len = f_chunk_end (f->shr->end_chunk) - tail;
 	      if (!len)
 		return SVM_FIFO_EGROW;
 	    }
@@ -1013,8 +1018,8 @@
       while (len)
 	{
 	  u32 to_copy = clib_min (segs[i].len, len);
-	  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, segs[i].data,
-				  to_copy, &f->tail_chunk);
+	  svm_fifo_copy_to_chunk (f, f->shr->tail_chunk, tail, segs[i].data,
+				  to_copy, &f->shr->tail_chunk);
 	  len -= to_copy;
 	  tail += to_copy;
 	  i++;
@@ -1026,12 +1031,12 @@
     {
       len += ooo_segment_try_collect (f, len, &tail);
       /* Tail chunk might've changed even if nothing was collected */
-      f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
+      f->shr->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
       f->ooo_enq = 0;
     }
 
   /* store-rel: producer owned index (paired with load-acq in consumer) */
-  clib_atomic_store_rel_n (&f->tail, tail);
+  clib_atomic_store_rel_n (&f->shr->tail, tail);
 
   return len;
 }
@@ -1043,12 +1048,12 @@
   rb_tree_t *rt;
   rb_node_t *n;
 
-  ASSERT (!f_chunk_includes_pos (f->start_chunk, end_pos));
+  ASSERT (!f_chunk_includes_pos (f->shr->start_chunk, end_pos));
 
   if (maybe_ooo)
     rt = &f->ooo_deq_lookup;
 
-  c = f->start_chunk;
+  c = f->shr->start_chunk;
 
   do
     {
@@ -1082,8 +1087,8 @@
     return 0;
 
   prev->next = 0;
-  start = f->start_chunk;
-  f->start_chunk = c;
+  start = f->shr->start_chunk;
+  f->shr->start_chunk = c;
 
   return start;
 }
@@ -1103,22 +1108,23 @@
 
   len = clib_min (cursize, len);
 
-  if (!f->head_chunk)
-    f->head_chunk = svm_fifo_find_chunk (f, head);
+  if (!f->shr->head_chunk)
+    f->shr->head_chunk = svm_fifo_find_chunk (f, head);
 
-  svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk);
+  svm_fifo_copy_from_chunk (f, f->shr->head_chunk, head, dst, len,
+			    &f->shr->head_chunk);
   head = head + len;
 
   /* In order dequeues are not supported in combination with ooo peeking.
    * Use svm_fifo_dequeue_drop instead. */
   ASSERT (rb_tree_n_nodes (&f->ooo_deq_lookup) <= 1);
 
-  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
-    fsh_collect_chunks (f->fs_hdr, f->slice_index,
+  if (f_pos_geq (head, f_chunk_end (f->shr->start_chunk)))
+    fsh_collect_chunks (f->fs_hdr, f->shr->slice_index,
 			f_unlink_chunks (f, head, 0));
 
   /* store-rel: consumer owned index (paired with load-acq in producer) */
-  clib_atomic_store_rel_n (&f->head, head);
+  clib_atomic_store_rel_n (&f->shr->head, head);
 
   return len;
 }
@@ -1167,16 +1173,17 @@
   /* move head */
   head = head + total_drop_bytes;
 
-  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
+  if (f_pos_geq (head, f_chunk_end (f->shr->start_chunk)))
     {
-      fsh_collect_chunks (f->fs_hdr, f->slice_index,
+      fsh_collect_chunks (f->fs_hdr, f->shr->slice_index,
 			  f_unlink_chunks (f, head, 1));
-      f->head_chunk =
-	f_chunk_includes_pos (f->start_chunk, head) ? f->start_chunk : 0;
+      f->shr->head_chunk = f_chunk_includes_pos (f->shr->start_chunk, head) ?
+			     f->shr->start_chunk :
+			     0;
     }
 
   /* store-rel: consumer owned index (paired with load-acq in producer) */
-  clib_atomic_store_rel_n (&f->head, head);
+  clib_atomic_store_rel_n (&f->shr->head, head);
 
   return total_drop_bytes;
 }
@@ -1192,17 +1199,17 @@
 
   f_load_head_tail_all_acq (f, &head, &tail);
 
-  if (!f->head_chunk || !f_chunk_includes_pos (f->head_chunk, head))
-    f->head_chunk = svm_fifo_find_chunk (f, head);
+  if (!f->shr->head_chunk || !f_chunk_includes_pos (f->shr->head_chunk, head))
+    f->shr->head_chunk = svm_fifo_find_chunk (f, head);
 
-  f->head_chunk = f_lookup_clear_deq_chunks (f, f->head_chunk, tail);
+  f->shr->head_chunk = f_lookup_clear_deq_chunks (f, f->shr->head_chunk, tail);
 
-  if (f_pos_geq (tail, f_chunk_end (f->start_chunk)))
-    fsh_collect_chunks (f->fs_hdr, f->slice_index,
+  if (f_pos_geq (tail, f_chunk_end (f->shr->start_chunk)))
+    fsh_collect_chunks (f->fs_hdr, f->shr->slice_index,
 			f_unlink_chunks (f, tail, 0));
 
   /* store-rel: consumer owned index (paired with load-acq in producer) */
-  clib_atomic_store_rel_n (&f->head, tail);
+  clib_atomic_store_rel_n (&f->shr->head, tail);
 }
 
 int
@@ -1212,10 +1219,10 @@
 
   f_load_head_tail_prod (f, &head, &tail);
 
-  if (f_chunk_end (f->end_chunk) - head >= f->size)
+  if (f_chunk_end (f->shr->end_chunk) - head >= f->shr->size)
     return 0;
 
-  if (f_try_chunk_alloc (f, head, tail, f->size - (tail - head)))
+  if (f_try_chunk_alloc (f, head, tail, f->shr->size - (tail - head)))
     return SVM_FIFO_EGROW;
 
   return 0;
@@ -1233,12 +1240,12 @@
   if (f_free_count (f, head, tail) < len)
     return SVM_FIFO_EFULL;
 
-  n_avail = f_chunk_end (f->end_chunk) - tail;
+  n_avail = f_chunk_end (f->shr->end_chunk) - tail;
 
   if (n_avail < len && f_try_chunk_alloc (f, head, tail, len))
     return SVM_FIFO_EGROW;
 
-  c = f->tail_chunk;
+  c = f->shr->tail_chunk;
   head_pos = (tail - c->start_byte);
   fs[0].data = c->data + head_pos;
   fs[0].len = clib_min (c->length - head_pos, len);
@@ -1279,10 +1286,10 @@
   to_read = clib_min (cursize - offset, max_bytes);
   start = head + offset;
 
-  if (!f->head_chunk)
-    f->head_chunk = svm_fifo_find_chunk (f, head);
+  if (!f->shr->head_chunk)
+    f->shr->head_chunk = svm_fifo_find_chunk (f, head);
 
-  c = f->head_chunk;
+  c = f->shr->head_chunk;
 
   while (!f_chunk_includes_pos (c, start))
     c = c->next;
@@ -1320,11 +1327,12 @@
   /* Support only single chunk clones for now */
   ASSERT (svm_fifo_n_chunks (sf) == 1);
 
-  clib_memcpy_fast (df->head_chunk->data, sf->head_chunk->data, sf->size);
+  clib_memcpy_fast (df->shr->head_chunk->data, sf->shr->head_chunk->data,
+		    sf->shr->size);
 
   f_load_head_tail_all_acq (sf, &head, &tail);
-  clib_atomic_store_rel_n (&df->head, head);
-  clib_atomic_store_rel_n (&df->tail, tail);
+  clib_atomic_store_rel_n (&df->shr->head, head);
+  clib_atomic_store_rel_n (&df->shr->tail, tail);
 }
 
 u32
@@ -1347,23 +1355,23 @@
 {
   svm_fifo_chunk_t *c;
 
-  clib_atomic_store_rel_n (&f->head, head);
-  clib_atomic_store_rel_n (&f->tail, tail);
+  clib_atomic_store_rel_n (&f->shr->head, head);
+  clib_atomic_store_rel_n (&f->shr->tail, tail);
 
   c = svm_fifo_find_chunk (f, head);
   ASSERT (c != 0);
-  f->head_chunk = f->ooo_deq = c;
+  f->shr->head_chunk = f->ooo_deq = c;
   c = svm_fifo_find_chunk (f, tail);
   ASSERT (c != 0);
-  f->tail_chunk = f->ooo_enq = c;
+  f->shr->tail_chunk = f->ooo_enq = c;
 }
 
 void
 svm_fifo_add_subscriber (svm_fifo_t * f, u8 subscriber)
 {
-  if (f->n_subscribers >= SVM_FIFO_MAX_EVT_SUBSCRIBERS)
+  if (f->shr->n_subscribers >= SVM_FIFO_MAX_EVT_SUBSCRIBERS)
     return;
-  f->subscribers[f->n_subscribers++] = subscriber;
+  f->shr->subscribers[f->shr->n_subscribers++] = subscriber;
 }
 
 void
@@ -1371,12 +1379,12 @@
 {
   int i;
 
-  for (i = 0; i < f->n_subscribers; i++)
+  for (i = 0; i < f->shr->n_subscribers; i++)
     {
-      if (f->subscribers[i] != subscriber)
+      if (f->shr->subscribers[i] != subscriber)
 	continue;
-      f->subscribers[i] = f->subscribers[f->n_subscribers - 1];
-      f->n_subscribers--;
+      f->shr->subscribers[i] = f->shr->subscribers[f->shr->n_subscribers - 1];
+      f->shr->n_subscribers--;
       break;
     }
 }
@@ -1386,17 +1394,20 @@
 {
   svm_fifo_chunk_t *tmp;
 
-  if (f->head_chunk && !f_chunk_includes_pos (f->head_chunk, f->head))
+  if (f->shr->head_chunk &&
+      !f_chunk_includes_pos (f->shr->head_chunk, f->shr->head))
     return 0;
-  if (f->tail_chunk && !f_chunk_includes_pos (f->tail_chunk, f->tail))
+  if (f->shr->tail_chunk &&
+      !f_chunk_includes_pos (f->shr->tail_chunk, f->shr->tail))
     return 0;
   if (f->ooo_deq)
     {
       if (rb_tree_is_init (&f->ooo_deq_lookup))
 	{
-	  if (f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte)
-	      || f_pos_gt (f->ooo_deq->start_byte,
-			   f_chunk_end (f->end_chunk)))
+	  if (f_pos_lt (f->ooo_deq->start_byte,
+			f->shr->start_chunk->start_byte) ||
+	      f_pos_gt (f->ooo_deq->start_byte,
+			f_chunk_end (f->shr->end_chunk)))
 	    return 0;
 
 	  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup,
@@ -1411,9 +1422,10 @@
     {
       if (rb_tree_is_init (&f->ooo_enq_lookup))
 	{
-	  if (f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte)
-	      || f_pos_gt (f->ooo_enq->start_byte,
-			   f_chunk_end (f->end_chunk)))
+	  if (f_pos_lt (f->ooo_enq->start_byte,
+			f->shr->start_chunk->start_byte) ||
+	      f_pos_gt (f->ooo_enq->start_byte,
+			f_chunk_end (f->shr->end_chunk)))
 	    return 0;
 
 	  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup,
@@ -1421,19 +1433,19 @@
 	}
       else
 	{
-	  tmp = svm_fifo_find_next_chunk (f, f->tail_chunk,
+	  tmp = svm_fifo_find_next_chunk (f, f->shr->tail_chunk,
 					  f->ooo_enq->start_byte);
 	}
       if (tmp != f->ooo_enq)
 	return 0;
     }
 
-  if (f->start_chunk->next)
+  if (f->shr->start_chunk->next)
     {
       svm_fifo_chunk_t *c, *prev = 0, *tmp;
       u32 chunks_bytes = 0;
 
-      c = f->start_chunk;
+      c = f->shr->start_chunk;
       do
 	{
 	  tmp = svm_fifo_find_chunk (f, c->start_byte);
@@ -1467,7 +1479,7 @@
 	}
       while (c);
 
-      if (chunks_bytes < f->tail - f->head)
+      if (chunks_bytes < f->shr->tail - f->shr->head)
 	return 0;
     }
 
@@ -1480,7 +1492,7 @@
   svm_fifo_chunk_t *c;
   int n_chunks = 0;
 
-  c = f->start_chunk;
+  c = f->shr->start_chunk;
   while (c)
     {
       n_chunks++;
@@ -1544,10 +1556,10 @@
   trace_len = 0;
 #endif
 
-  placeholder_fifo = svm_fifo_alloc (f->size);
-  svm_fifo_init (f, f->size);
-  clib_memset (f->head_chunk->data, 0xFF, f->size);
-  vec_validate (data, f->size);
+  placeholder_fifo = svm_fifo_alloc (f->shr->size);
+  svm_fifo_init (f, f->shr->size);
+  clib_memset (f->shr->head_chunk->data, 0xFF, f->shr->size);
+  vec_validate (data, f->shr->size);
   for (i = 0; i < vec_len (data); i++)
     data[i] = i;
 
@@ -1614,14 +1626,15 @@
 
   indent = format_get_indent (s);
   s = format (s, "cursize %u nitems %u has_event %d min_alloc %u\n",
-	      svm_fifo_max_dequeue (f), f->size, f->has_event, f->min_alloc);
+	      svm_fifo_max_dequeue (f), f->shr->size, f->shr->has_event,
+	      f->shr->min_alloc);
   s = format (s, "%Uhead %u tail %u segment manager %u\n", format_white_space,
-	      indent, f->head, f->tail, f->segment_manager);
+	      indent, f->shr->head, f->shr->tail, f->segment_manager);
 
   if (verbose > 1)
     s = format (s, "%Uvpp session %d thread %d app session %d thread %d\n",
-		format_white_space, indent, f->master_session_index,
-		f->master_thread_index, f->client_session_index,
+		format_white_space, indent, f->shr->master_session_index,
+		f->master_thread_index, f->shr->client_session_index,
 		f->client_thread_index);
 
   if (verbose)
diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h
index 5656e63..5845d70 100644
--- a/src/svm/svm_fifo.h
+++ b/src/svm/svm_fifo.h
@@ -80,9 +80,9 @@
 f_load_head_tail_cons (svm_fifo_t * f, u32 * head, u32 * tail)
 {
   /* load-relaxed: consumer owned index */
-  *head = f->head;
+  *head = f->shr->head;
   /* load-acq: consumer foreign index (paired with store-rel in producer) */
-  *tail = clib_atomic_load_acq_n (&f->tail);
+  *tail = clib_atomic_load_acq_n (&f->shr->tail);
 }
 
 /** Load head and tail optimized for producer
@@ -93,9 +93,9 @@
 f_load_head_tail_prod (svm_fifo_t * f, u32 * head, u32 * tail)
 {
   /* load relaxed: producer owned index */
-  *tail = f->tail;
+  *tail = f->shr->tail;
   /* load-acq: producer foreign index (paired with store-rel in consumer) */
-  *head = clib_atomic_load_acq_n (&f->head);
+  *head = clib_atomic_load_acq_n (&f->shr->head);
 }
 
 /**
@@ -107,9 +107,9 @@
 f_load_head_tail_all_acq (svm_fifo_t * f, u32 * head, u32 * tail)
 {
   /* load-acq : consumer foreign index (paired with store-rel) */
-  *tail = clib_atomic_load_acq_n (&f->tail);
+  *tail = clib_atomic_load_acq_n (&f->shr->tail);
   /* load-acq : producer foriegn index (paired with store-rel) */
-  *head = clib_atomic_load_acq_n (&f->head);
+  *head = clib_atomic_load_acq_n (&f->shr->head);
 }
 
 /**
@@ -131,7 +131,7 @@
 static inline u32
 f_free_count (svm_fifo_t * f, u32 head, u32 tail)
 {
-  return (f->size - f_cursize (f, head, tail));
+  return (f->shr->size - f_cursize (f, head, tail));
 }
 
 always_inline u32
@@ -487,7 +487,7 @@
 static inline int
 svm_fifo_is_full_prod (svm_fifo_t * f)
 {
-  return (svm_fifo_max_dequeue_prod (f) == f->size);
+  return (svm_fifo_max_dequeue_prod (f) == f->shr->size);
 }
 
 /* Check if fifo is full.
@@ -499,7 +499,7 @@
 static inline int
 svm_fifo_is_full (svm_fifo_t * f)
 {
-  return (svm_fifo_max_dequeue (f) == f->size);
+  return (svm_fifo_max_dequeue (f) == f->shr->size);
 }
 
 /**
@@ -606,7 +606,7 @@
 static inline svm_fifo_chunk_t *
 svm_fifo_head_chunk (svm_fifo_t * f)
 {
-  return f->head_chunk;
+  return f->shr->head_chunk;
 }
 
 /**
@@ -618,10 +618,11 @@
 static inline u8 *
 svm_fifo_head (svm_fifo_t * f)
 {
-  if (!f->head_chunk)
+  if (!f->shr->head_chunk)
     return 0;
   /* load-relaxed: consumer owned index */
-  return (f->head_chunk->data + (f->head - f->head_chunk->start_byte));
+  return (f->shr->head_chunk->data +
+	  (f->shr->head - f->shr->head_chunk->start_byte));
 }
 
 /**
@@ -633,7 +634,7 @@
 static inline svm_fifo_chunk_t *
 svm_fifo_tail_chunk (svm_fifo_t * f)
 {
-  return f->tail_chunk;
+  return f->shr->tail_chunk;
 }
 
 /**
@@ -646,7 +647,8 @@
 svm_fifo_tail (svm_fifo_t * f)
 {
   /* load-relaxed: producer owned index */
-  return (f->tail_chunk->data + (f->tail - f->tail_chunk->start_byte));
+  return (f->shr->tail_chunk->data +
+	  (f->shr->tail - f->shr->tail_chunk->start_byte));
 }
 
 /**
@@ -658,7 +660,7 @@
 static inline u8
 svm_fifo_n_subscribers (svm_fifo_t * f)
 {
-  return f->n_subscribers;
+  return f->shr->n_subscribers;
 }
 
 /**
@@ -692,7 +694,7 @@
 {
   u32 tail;
   /* load-relaxed: producer owned index */
-  tail = f->tail;
+  tail = f->shr->tail;
 
   return (s->start - tail);
 }
@@ -706,7 +708,7 @@
 static inline u32
 svm_fifo_size (svm_fifo_t * f)
 {
-  return f->size;
+  return f->shr->size;
 }
 
 static inline void
@@ -714,8 +716,9 @@
 {
   if (size > (1 << f->fs_hdr->max_log2_fifo_size))
     return;
-  fsh_virtual_mem_update (f->fs_hdr, f->slice_index, (int) f->size - size);
-  f->size = size;
+  fsh_virtual_mem_update (f->fs_hdr, f->shr->slice_index,
+			  (int) f->shr->size - size);
+  f->shr->size = size;
 }
 
 /**
@@ -727,7 +730,7 @@
 static inline int
 svm_fifo_has_event (svm_fifo_t * f)
 {
-  return f->has_event;
+  return f->shr->has_event;
 }
 
 /**
@@ -741,7 +744,7 @@
 always_inline u8
 svm_fifo_set_event (svm_fifo_t * f)
 {
-  return !clib_atomic_swap_rel_n (&f->has_event, 1);
+  return !clib_atomic_swap_rel_n (&f->shr->has_event, 1);
 }
 
 /**
@@ -754,7 +757,7 @@
 always_inline void
 svm_fifo_unset_event (svm_fifo_t * f)
 {
-  clib_atomic_swap_acq_n (&f->has_event, 0);
+  clib_atomic_swap_acq_n (&f->shr->has_event, 0);
 }
 
 /**
@@ -768,7 +771,7 @@
 static inline void
 svm_fifo_add_want_deq_ntf (svm_fifo_t * f, u8 ntf_type)
 {
-  f->want_deq_ntf |= ntf_type;
+  f->shr->want_deq_ntf |= ntf_type;
 }
 
 /**
@@ -782,7 +785,7 @@
 static inline void
 svm_fifo_del_want_deq_ntf (svm_fifo_t * f, u8 ntf_type)
 {
-  f->want_deq_ntf &= ~ntf_type;
+  f->shr->want_deq_ntf &= ~ntf_type;
 }
 
 /**
@@ -800,7 +803,8 @@
 svm_fifo_clear_deq_ntf (svm_fifo_t * f)
 {
   /* Set the flag if want_notif_if_full was the only ntf requested */
-  f->has_deq_ntf = f->want_deq_ntf == SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL;
+  f->shr->has_deq_ntf =
+    f->shr->want_deq_ntf == SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL;
   svm_fifo_del_want_deq_ntf (f, SVM_FIFO_WANT_DEQ_NOTIF);
 }
 
@@ -816,7 +820,7 @@
 static inline void
 svm_fifo_reset_has_deq_ntf (svm_fifo_t * f)
 {
-  f->has_deq_ntf = 0;
+  f->shr->has_deq_ntf = 0;
 }
 
 /**
@@ -832,7 +836,7 @@
 static inline u8
 svm_fifo_needs_deq_ntf (svm_fifo_t * f, u32 n_last_deq)
 {
-  u8 want_ntf = f->want_deq_ntf;
+  u8 want_ntf = f->shr->want_deq_ntf;
 
   if (PREDICT_TRUE (want_ntf == SVM_FIFO_NO_DEQ_NOTIF))
     return 0;
@@ -841,13 +845,14 @@
   if (want_ntf & SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL)
     {
       u32 max_deq = svm_fifo_max_dequeue_cons (f);
-      u32 size = f->size;
-      if (!f->has_deq_ntf && max_deq < size && max_deq + n_last_deq >= size)
+      u32 size = f->shr->size;
+      if (!f->shr->has_deq_ntf && max_deq < size &&
+	  max_deq + n_last_deq >= size)
 	return 1;
     }
   if (want_ntf & SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY)
     {
-      if (!f->has_deq_ntf && svm_fifo_is_empty (f))
+      if (!f->shr->has_deq_ntf && svm_fifo_is_empty (f))
 	return 1;
     }
   return 0;