32/64 shmem bihash interoperability

Move the binary api segment above 4gb

Change-Id: I40e8aa7a97722a32397f5a538b5ff8344c50d408
Signed-off-by: Dave Barach <dave@barachs.net>
diff --git a/src/svm/svm.c b/src/svm/svm.c
index aa84a2f..681ac4a 100644
--- a/src/svm/svm.c
+++ b/src/svm/svm.c
@@ -92,7 +92,7 @@
 #endif
 
   /* default value */
-  return 0x30000000;
+  return 0x130000000ULL;
 }
 
 static void
diff --git a/src/vnet/fib/ip6_fib.c b/src/vnet/fib/ip6_fib.c
index da2f1ea..22b4757 100644
--- a/src/vnet/fib/ip6_fib.c
+++ b/src/vnet/fib/ip6_fib.c
@@ -571,12 +571,13 @@
     uword bytes_inuse;
 
     bytes_inuse = 
-        ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING].ip6_hash.alloc_arena_next
-        - ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING].ip6_hash.alloc_arena;
+        alloc_arena_next 
+        (&(ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING].ip6_hash))
+        - alloc_arena (&(ip6_main.ip6_table[IP6_FIB_TABLE_NON_FWDING].ip6_hash));
 
     bytes_inuse += 
-        ip6_main.ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash.alloc_arena_next
-        - ip6_main.ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash.alloc_arena;
+        alloc_arena_next(&(ip6_main.ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash))
+        - alloc_arena(&(ip6_main.ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash));
 
     s = format(s, "%=30s %=6d %=8ld\n",
                "IPv6 unicast",
diff --git a/src/vppinfra.am b/src/vppinfra.am
index 57f7a1f..1d8e270 100644
--- a/src/vppinfra.am
+++ b/src/vppinfra.am
@@ -117,7 +117,7 @@
 test_vec_CPPFLAGS =	$(AM_CPPFLAGS) -DCLIB_DEBUG
 test_zvec_CPPFLAGS =	$(AM_CPPFLAGS) -DCLIB_DEBUG
 
-test_bihash_template_LDADD =	libvppinfra.la
+test_bihash_template_LDADD =	libvppinfra.la -lpthread 
 test_bihash_vec88_LDADD =	libvppinfra.la
 test_cuckoo_template_LDADD =	libvppinfra.la
 test_cuckoo_bihash_LDADD =	libvppinfra.la
diff --git a/src/vppinfra/bihash_16_8_32.h b/src/vppinfra/bihash_16_8_32.h
new file mode 100644
index 0000000..d30625d
--- /dev/null
+++ b/src/vppinfra/bihash_16_8_32.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef BIHASH_TYPE
+#undef BIHASH_KVP_PER_PAGE
+
+#define BIHASH_TYPE _16_8_32
+#define BIHASH_KVP_PER_PAGE 4
+
+#define BIHASH_32_64_SVM 1
+
+#ifndef __included_bihash_16_18_32_h__
+#define __included_bihash_16_18_32_h__
+
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/xxhash.h>
+#include <vppinfra/crc32.h>
+
+typedef struct
+{
+  u64 key[2];
+  u64 value;
+} clib_bihash_kv_16_8_32_t;
+
+static inline int
+clib_bihash_is_free_16_8_32 (clib_bihash_kv_16_8_32_t * v)
+{
+  /* Free values are memset to 0xff, check a bit... */
+  if (v->key[0] == ~0ULL && v->value == ~0ULL)
+    return 1;
+  return 0;
+}
+
+static inline u64
+clib_bihash_hash_16_8_32 (clib_bihash_kv_16_8_32_t * v)
+{
+#ifdef clib_crc32c_uses_intrinsics
+  return clib_crc32c ((u8 *) v->key, 16);
+#else
+  u64 tmp = v->key[0] ^ v->key[1];
+  return clib_xxhash (tmp);
+#endif
+}
+
+static inline u8 *
+format_bihash_kvp_16_8_32 (u8 * s, va_list * args)
+{
+  clib_bihash_kv_16_8_32_t *v = va_arg (*args, clib_bihash_kv_16_8_32_t *);
+
+  s = format (s, "key %llu %llu value %llu", v->key[0], v->key[1], v->value);
+  return s;
+}
+
+static inline int
+clib_bihash_key_compare_16_8_32 (u64 * a, u64 * b)
+{
+#if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
+  u64x2 v;
+  v = u64x2_load_unaligned (a) ^ u64x2_load_unaligned (b);
+  return u64x2_is_all_zero (v);
+#else
+  return ((a[0] ^ b[0]) | (a[1] ^ b[1])) == 0;
+#endif
+}
+
+#undef __included_bihash_template_h__
+#include <vppinfra/bihash_template.h>
+
+#endif /* __included_bihash_16_8_32_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_template.c b/src/vppinfra/bihash_template.c
index e13ceb7..fa92c8b 100644
--- a/src/vppinfra/bihash_template.c
+++ b/src/vppinfra/bihash_template.c
@@ -23,10 +23,10 @@
   nbytes += CLIB_CACHE_LINE_BYTES - 1;
   nbytes &= ~(CLIB_CACHE_LINE_BYTES - 1);
 
-  rv = h->alloc_arena_next;
-  h->alloc_arena_next += nbytes;
+  rv = alloc_arena_next (h);
+  alloc_arena_next (h) += nbytes;
 
-  if (rv >= (h->alloc_arena + h->alloc_arena_size))
+  if (rv >= (alloc_arena (h) + alloc_arena_size (h)))
     os_out_of_memory ();
 
   return (void *) rv;
@@ -52,9 +52,9 @@
    */
   ASSERT (memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
 
-  h->alloc_arena = (uword) clib_mem_vm_alloc (memory_size);
-  h->alloc_arena_next = h->alloc_arena;
-  h->alloc_arena_size = memory_size;
+  alloc_arena (h) = (uword) clib_mem_vm_alloc (memory_size);
+  alloc_arena_next (h) = alloc_arena (h);
+  alloc_arena_size (h) = memory_size;
 
   bucket_size = nbuckets * sizeof (h->buckets[0]);
   h->buckets = BV (alloc_aligned) (h, bucket_size);
@@ -65,6 +65,129 @@
   h->fmt_fn = NULL;
 }
 
+#if BIHASH_32_64_SVM
+#if !defined (MFD_ALLOW_SEALING)
+#define MFD_ALLOW_SEALING 0x0002U
+#endif
+
+void BV (clib_bihash_master_init_svm)
+  (BVT (clib_bihash) * h, char *name, u32 nbuckets,
+   u64 base_address, u64 memory_size)
+{
+  uword bucket_size;
+  u8 *mmap_addr;
+  vec_header_t *freelist_vh;
+  int fd;
+
+  ASSERT (base_address);
+  ASSERT (base_address + memory_size < (1ULL << 32));
+
+  /* Set up for memfd sharing */
+  if ((fd = memfd_create (name, MFD_ALLOW_SEALING)) == -1)
+    {
+      clib_unix_warning ("memfd_create");
+      return;
+    }
+
+  if (ftruncate (fd, memory_size) < 0)
+    {
+      clib_unix_warning ("ftruncate");
+      return;
+    }
+
+  /* Not mission-critical, complain and continue */
+  if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
+    clib_unix_warning ("fcntl (F_ADD_SEALS)");
+
+  mmap_addr = mmap (u64_to_pointer (base_address), memory_size,
+		    PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd,
+		    0 /* offset */ );
+
+  if (mmap_addr == MAP_FAILED)
+    {
+      clib_unix_warning ("mmap failed");
+      ASSERT (0);
+    }
+
+  h->sh = (void *) mmap_addr;
+  h->memfd = fd;
+  nbuckets = 1 << (max_log2 (nbuckets));
+
+  h->name = (u8 *) name;
+  h->sh->nbuckets = h->nbuckets = nbuckets;
+  h->log2_nbuckets = max_log2 (nbuckets);
+
+  alloc_arena (h) = (u64) (uword) mmap_addr;
+  alloc_arena_next (h) = alloc_arena (h) + CLIB_CACHE_LINE_BYTES;
+  alloc_arena_size (h) = memory_size;
+
+  bucket_size = nbuckets * sizeof (h->buckets[0]);
+  h->buckets = BV (alloc_aligned) (h, bucket_size);
+  h->sh->buckets_as_u64 = (u64) (uword) h->buckets;
+
+  h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES);
+  h->alloc_lock[0] = 0;
+
+  h->sh->alloc_lock_as_u64 = (u64) (uword) (h->alloc_lock);
+  freelist_vh = BV (alloc_aligned) (h, sizeof (vec_header_t) +
+				    BIHASH_FREELIST_LENGTH * sizeof (u64));
+  freelist_vh->len = BIHASH_FREELIST_LENGTH;
+  freelist_vh->dlmalloc_header_offset = 0xDEADBEEF;
+  h->sh->freelists_as_u64 = (u64) (uword) freelist_vh->vector_data;
+  h->freelists = (void *) (uword) (h->sh->freelists_as_u64);
+
+  h->fmt_fn = NULL;
+}
+
+void BV (clib_bihash_slave_init_svm)
+  (BVT (clib_bihash) * h, char *name, int fd)
+{
+  u8 *mmap_addr;
+  u64 base_address, memory_size;
+  BVT (clib_bihash_shared_header) * sh;
+
+  /* Trial mapping, to place the segment */
+  mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 /* offset */ );
+  if (mmap_addr == MAP_FAILED)
+    {
+      clib_unix_warning ("trial mmap failed");
+      ASSERT (0);
+    }
+
+  sh = (BVT (clib_bihash_shared_header) *) mmap_addr;
+
+  base_address = sh->alloc_arena;
+  memory_size = sh->alloc_arena_size;
+
+  munmap (mmap_addr, 4096);
+
+  /* Actual mapping, at the required address */
+  mmap_addr = mmap (u64_to_pointer (base_address), memory_size,
+		    PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd,
+		    0 /* offset */ );
+
+  if (mmap_addr == MAP_FAILED)
+    {
+      clib_unix_warning ("mmap failed");
+      ASSERT (0);
+    }
+
+  (void) close (fd);
+
+  h->sh = (void *) mmap_addr;
+  h->memfd = -1;
+
+  h->name = (u8 *) name;
+  h->buckets = u64_to_pointer (h->sh->buckets_as_u64);
+  h->nbuckets = h->sh->nbuckets;
+  h->log2_nbuckets = max_log2 (h->nbuckets);
+
+  h->alloc_lock = u64_to_pointer (h->sh->alloc_lock_as_u64);
+  h->freelists = u64_to_pointer (h->sh->freelists_as_u64);
+  h->fmt_fn = NULL;
+}
+#endif /* BIHASH_32_64_SVM */
+
 void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
 					 format_function_t * fmt_fn)
 {
@@ -74,8 +197,13 @@
 void BV (clib_bihash_free) (BVT (clib_bihash) * h)
 {
   vec_free (h->working_copies);
+#if BIHASH_32_64_SVM == 0
   vec_free (h->freelists);
-  clib_mem_vm_free ((void *) (h->alloc_arena), h->alloc_arena_size);
+#else
+  if (h->memfd > 0)
+    (void) close (h->memfd);
+#endif
+  clib_mem_vm_free ((void *) (uword) (alloc_arena (h)), alloc_arena_size (h));
   memset (h, 0, sizeof (*h));
 }
 
@@ -86,14 +214,19 @@
   BVT (clib_bihash_value) * rv = 0;
 
   ASSERT (h->alloc_lock[0]);
+
+#if BIHASH_32_64_SVM
+  ASSERT (log2_pages < vec_len (h->freelists));
+#endif
+
   if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
     {
       vec_validate_init_empty (h->freelists, log2_pages, 0);
       rv = BV (alloc_aligned) (h, (sizeof (*rv) * (1 << log2_pages)));
       goto initialize;
     }
-  rv = h->freelists[log2_pages];
-  h->freelists[log2_pages] = rv->next_free;
+  rv = (void *) (uword) h->freelists[log2_pages];
+  h->freelists[log2_pages] = rv->next_free_as_u64;
 
 initialize:
   ASSERT (rv);
@@ -117,8 +250,8 @@
   if (CLIB_DEBUG > 0)
     memset (v, 0xFE, sizeof (*v) * (1 << log2_pages));
 
-  v->next_free = h->freelists[log2_pages];
-  h->freelists[log2_pages] = v;
+  v->next_free_as_u64 = (u64) h->freelists[log2_pages];
+  h->freelists[log2_pages] = (u64) (uword) v;
 }
 
 static inline void
@@ -361,6 +494,7 @@
 	      CLIB_MEMORY_BARRIER ();	/* Make sure the value has settled */
 	      clib_memcpy (&(v->kvp[i]), &add_v->key, sizeof (add_v->key));
 	      b->refcnt++;
+	      ASSERT (b->refcnt > 0);
 	      BV (clib_bihash_unlock_bucket) (b);
 	      return (0);
 	    }
@@ -490,6 +624,7 @@
   tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
   tmp_b.linear_search = mark_bucket_linear;
   tmp_b.refcnt = h->saved_bucket.refcnt + 1;
+  ASSERT (tmp_b.refcnt > 0);
   tmp_b.lock = 0;
   CLIB_MEMORY_BARRIER ();
   b->as_u64 = tmp_b.as_u64;
@@ -587,7 +722,7 @@
 
       if (verbose)
 	{
-	  s = format (s, "[%d]: heap offset %d, len %d, linear %d\n", i,
+	  s = format (s, "[%d]: heap offset %lld, len %d, linear %d\n", i,
 		      b->offset, (1 << b->log2_pages), b->linear_search);
 	}
 
@@ -633,24 +768,25 @@
       u32 nfree = 0;
       BVT (clib_bihash_value) * free_elt;
 
-      free_elt = h->freelists[i];
+      free_elt = (void *) (uword) h->freelists[i];
       while (free_elt)
 	{
 	  nfree++;
-	  free_elt = free_elt->next_free;
+	  free_elt = (void *) (uword) free_elt->next_free_as_u64;
 	}
 
-      s = format (s, "       [len %d] %u free elts\n", 1 << i, nfree);
+      if (nfree || verbose)
+	s = format (s, "       [len %d] %u free elts\n", 1 << i, nfree);
     }
 
   s = format (s, "    %lld linear search buckets\n", linear_buckets);
-  used_bytes = h->alloc_arena_next - h->alloc_arena;
+  used_bytes = alloc_arena_next (h) - alloc_arena (h);
   s = format (s,
 	      "    arena: base %llx, next %llx\n"
 	      "           used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
-	      h->alloc_arena, h->alloc_arena_next,
+	      alloc_arena (h), alloc_arena_next (h),
 	      used_bytes, used_bytes >> 20,
-	      h->alloc_arena_size, h->alloc_arena_size >> 20);
+	      alloc_arena_size (h), alloc_arena_size (h) >> 20);
   return s;
 }
 
diff --git a/src/vppinfra/bihash_template.h b/src/vppinfra/bihash_template.h
index 4ff7e1b..ef7e830 100644
--- a/src/vppinfra/bihash_template.h
+++ b/src/vppinfra/bihash_template.h
@@ -33,6 +33,17 @@
 #error BIHASH_TYPE not defined
 #endif
 
+#ifdef BIHASH_32_64_SVM
+#undef HAVE_MEMFD_CREATE
+#include <vppinfra/linux/syscall.h>
+#include <fcntl.h>
+#define F_LINUX_SPECIFIC_BASE 1024
+#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
+#define F_SEAL_SHRINK (2)
+/* Max page size 2**16 due to refcount width  */
+#define BIHASH_FREELIST_LENGTH 17
+#endif
+
 #define _bv(a,b) a##b
 #define __bv(a,b) _bv(a,b)
 #define BV(a) __bv(a,BIHASH_TYPE)
@@ -41,12 +52,22 @@
 #define __bvt(a,b) _bvt(a,b)
 #define BVT(a) __bvt(a,BIHASH_TYPE)
 
+#if _LP64 == 0
+#define OVERFLOW_ASSERT(x) ASSERT(((x) & 0xFFFFFFFF00000000ULL) == 0)
+#define u64_to_pointer(x) (void *)(u32)((x))
+#define pointer_to_u64(x) (u64)(u32)((x))
+#else
+#define OVERFLOW_ASSERT(x)
+#define u64_to_pointer(x) (void *)((x))
+#define pointer_to_u64(x) (u64)((x))
+#endif
+
 typedef struct BV (clib_bihash_value)
 {
   union
   {
     BVT (clib_bihash_kv) kvp[BIHASH_KVP_PER_PAGE];
-    struct BV (clib_bihash_value) * next_free;
+    u64 next_free_as_u64;
   };
 } BVT (clib_bihash_value);
 
@@ -62,7 +83,7 @@
       u64 lock:1;
       u64 linear_search:1;
       u64 log2_pages:8;
-      i64 refcnt:16;
+      u64 refcnt:16;
     };
     u64 as_u64;
   };
@@ -70,9 +91,31 @@
 
 STATIC_ASSERT_SIZEOF (BVT (clib_bihash_bucket), sizeof (u64));
 
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+  /*
+   * Backing store allocation. Since bihash manages its own
+   * freelists, we simple dole out memory at alloc_arena_next.
+   */
+  u64 alloc_arena_next;	/* Next VA to allocate, definitely NOT a constant */
+  u64 alloc_arena_size;	/* Size of the arena */
+  u64 alloc_arena;	/* Base VA of the arena */
+  /* Two SVM pointers stored as 8-byte integers */
+  u64 alloc_lock_as_u64;
+  u64 buckets_as_u64;
+  /* freelist list-head arrays/vectors */
+  u64 freelists_as_u64;
+  u32 nbuckets;	/* Number of buckets */
+  /* Set when header valid */
+  volatile u32 ready;
+  u64 pad;
+}) BVT (clib_bihash_shared_header);
+/* *INDENT-ON* */
+
+STATIC_ASSERT_SIZEOF (BVT (clib_bihash_shared_header), 8 * sizeof (u64));
+
 typedef struct
 {
-  BVT (clib_bihash_value) * values;
   BVT (clib_bihash_bucket) * buckets;
   volatile u32 *alloc_lock;
 
@@ -84,15 +127,14 @@
   u32 log2_nbuckets;
   u8 *name;
 
-    BVT (clib_bihash_value) ** freelists;
+  u64 *freelists;
 
-  /*
-   * Backing store allocation. Since bihash manages its own
-   * freelists, we simple dole out memory at alloc_arena_next.
-   */
-  uword alloc_arena;
-  uword alloc_arena_next;
-  uword alloc_arena_size;
+#if BIHASH_32_64_SVM
+    BVT (clib_bihash_shared_header) * sh;
+  int memfd;
+#else
+    BVT (clib_bihash_shared_header) sh;
+#endif
 
   /**
     * A custom format function to print the Key and Value of bihash_key instead of default hexdump
@@ -101,6 +143,26 @@
 
 } BVT (clib_bihash);
 
+#if BIHASH_32_64_SVM
+#undef alloc_arena_next
+#undef alloc_arena_size
+#undef alloc_arena
+#undef CLIB_BIHASH_READY_MAGIC
+#define alloc_arena_next(h) (((h)->sh)->alloc_arena_next)
+#define alloc_arena_size(h) (((h)->sh)->alloc_arena_size)
+#define alloc_arena(h) (((h)->sh)->alloc_arena)
+#define CLIB_BIHASH_READY_MAGIC 0xFEEDFACE
+#else
+#undef alloc_arena_next
+#undef alloc_arena_size
+#undef alloc_arena
+#undef CLIB_BIHASH_READY_MAGIC
+#define alloc_arena_next(h) ((h)->sh.alloc_arena_next)
+#define alloc_arena_size(h) ((h)->sh.alloc_arena_size)
+#define alloc_arena(h) ((h)->sh.alloc_arena)
+#define CLIB_BIHASH_READY_MAGIC 0
+#endif
+
 static inline void BV (clib_bihash_alloc_lock) (BVT (clib_bihash) * h)
 {
   while (__atomic_test_and_set (h->alloc_lock, __ATOMIC_ACQUIRE))
@@ -139,7 +201,7 @@
 static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h,
 						uword offset)
 {
-  u8 *hp = (u8 *) h->alloc_arena;
+  u8 *hp = (u8 *) (uword) alloc_arena (h);
   u8 *vp = hp + offset;
 
   return (void *) vp;
@@ -157,7 +219,7 @@
 {
   u8 *hp, *vp;
 
-  hp = (u8 *) h->alloc_arena;
+  hp = (u8 *) (uword) alloc_arena (h);
   vp = (u8 *) v;
 
   return vp - hp;
@@ -166,6 +228,14 @@
 void BV (clib_bihash_init)
   (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size);
 
+#if BIHASH_32_64_SVM
+void BV (clib_bihash_master_init_svm)
+  (BVT (clib_bihash) * h, char *name, u32 nbuckets,
+   u64 base_address, u64 memory_size);
+void BV (clib_bihash_slave_init_svm)
+  (BVT (clib_bihash) * h, char *name, int fd);
+#endif
+
 void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
 					 format_function_t * fmt_fn);
 
diff --git a/src/vppinfra/crc32.h b/src/vppinfra/crc32.h
index 7361129..5f4d94f 100644
--- a/src/vppinfra/crc32.h
+++ b/src/vppinfra/crc32.h
@@ -36,8 +36,10 @@
 #else
   /* workaround weird GCC bug when using _mm_crc32_u32
      which happens with -O2 optimization */
+#if !defined (__i686__)
   volatile ("":::"memory");
 #endif
+#endif
 
   for (; len >= 4; len -= 4, s += 4)
     v = _mm_crc32_u32 (v, *((u32 *) s));
diff --git a/src/vppinfra/linux/mem.c b/src/vppinfra/linux/mem.c
index e4740ad..bceb3b2 100644
--- a/src/vppinfra/linux/mem.c
+++ b/src/vppinfra/linux/mem.c
@@ -46,7 +46,7 @@
 #define F_SEAL_WRITE    0x0008	/* prevent writes */
 #endif
 
-uword
+u64
 clib_mem_vm_get_page_size (int fd)
 {
   struct stat st = { 0 };
@@ -87,7 +87,7 @@
   int log2_page_size;
   int n_pages;
   int old_mpol = -1;
-  u64 old_mask[16] = { 0 };
+  long unsigned int old_mask[16] = { 0 };
 
   /* save old numa mem policy if needed */
   if (a->flags & (CLIB_MEM_VM_F_NUMA_PREFER | CLIB_MEM_VM_F_NUMA_FORCE))
@@ -210,7 +210,7 @@
   if (old_mpol != -1)
     {
       int rv;
-      u64 mask[16] = { 0 };
+      long unsigned int mask[16] = { 0 };
       mask[0] = 1 << a->numa_node;
       rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
       if (rv == -1 && a->numa_node != 0 &&
diff --git a/src/vppinfra/test_bihash_template.c b/src/vppinfra/test_bihash_template.c
index e52f274..c1a4469 100644
--- a/src/vppinfra/test_bihash_template.c
+++ b/src/vppinfra/test_bihash_template.c
@@ -72,7 +72,13 @@
 
   h = &tm->hash;
 
+#if BIHASH_32_64_SVM
+  BV (clib_bihash_master_init_svm) (h, "test", user_buckets,
+				    0x30000000 /* base_addr */ ,
+				    user_memory_size);
+#else
   BV (clib_bihash_init) (h, "test", user_buckets, user_memory_size);
+#endif
 
   before = clib_time_now (&tm->clib_time);
 
@@ -116,7 +122,13 @@
 
   h = &tm->hash;
 
+#if BIHASH_32_64_SVM
+  BV (clib_bihash_master_init_svm) (h, "test", tm->nbuckets,
+				    0x30000000 /* base_addr */ ,
+				    tm->hash_memory_size);
+#else
   BV (clib_bihash_init) (h, "test", tm->nbuckets, tm->hash_memory_size);
+#endif
 
   fformat (stdout, "Add %d items to %d buckets\n", tm->nitems, tm->nbuckets);
 
@@ -195,7 +207,13 @@
 
   h = &tm->hash;
 
+#if BIHASH_32_64_SVM
+  BV (clib_bihash_master_init_svm) (h, "test", tm->nbuckets,
+				    0x30000000 /* base_addr */ ,
+				    tm->hash_memory_size);
+#else
   BV (clib_bihash_init) (h, "test", tm->nbuckets, tm->hash_memory_size);
+#endif
 
   tm->thread_barrier = 1;
 
@@ -243,7 +261,13 @@
 
   h = &tm->hash;
 
+#if BIHASH_32_64_SVM
+  BV (clib_bihash_master_init_svm) (h, "test", tm->nbuckets,
+				    0x30000000 /* base_addr */ ,
+				    tm->hash_memory_size);
+#else
   BV (clib_bihash_init) (h, "test", tm->nbuckets, tm->hash_memory_size);
+#endif
 
   for (acycle = 0; acycle < tm->ncycles; acycle++)
     {
@@ -420,6 +444,9 @@
   fformat (stdout, "End of run, should be empty...\n");
 
   fformat (stdout, "%U", BV (format_bihash), h, 0 /* very verbose */ );
+
+  BV (clib_bihash_free) (h);
+
   return 0;
 }
 
@@ -431,7 +458,7 @@
   int which = 0;
 
   tm->report_every_n = 1;
-  tm->hash_memory_size = 4095ULL << 20;
+  tm->hash_memory_size = 1ULL << 30;
 
   while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
     {
diff --git a/src/vppinfra/vector.h b/src/vppinfra/vector.h
index 2157ab7..2b84cc2 100644
--- a/src/vppinfra/vector.h
+++ b/src/vppinfra/vector.h
@@ -42,11 +42,12 @@
 
 /* Vector types. */
 
-#if defined (__MMX__) || defined (__IWMMXT__) || defined (__aarch64__)
+#if defined (__MMX__) || defined (__IWMMXT__) || defined (__aarch64__) \
+  || defined (__i686__)
 #define CLIB_HAVE_VEC64
 #endif
 
-#if defined (__aarch64__) && defined(__ARM_NEON)
+#if defined (__aarch64__) && defined(__ARM_NEON) || defined (__i686__)
 #define CLIB_HAVE_VEC128
 #endif