vlib: introduce vlib_get_main_by_index(), vlib_get_n_threads()

Type: improvement
Change-Id: If3da7d4338470912f37ff1794620418d928fb77f
Signed-off-by: Damjan Marion <damarion@cisco.com>
diff --git a/src/plugins/acl/dataplane_node.c b/src/plugins/acl/dataplane_node.c
index 77aaa0e..25fa4e9 100644
--- a/src/plugins/acl/dataplane_node.c
+++ b/src/plugins/acl/dataplane_node.c
@@ -418,7 +418,7 @@
 		    {
 		      trace_bitmap |= 0x80000000;
 		    }
-		  ASSERT (f_sess_id.thread_index < vec_len (vlib_mains));
+		  ASSERT (f_sess_id.thread_index < vlib_get_n_threads ());
 		  b[0]->error = no_error_existing_session;
 		  acl_check_needed = 0;
 		  pkts_exist_session += 1;
diff --git a/src/plugins/acl/sess_mgmt_node.c b/src/plugins/acl/sess_mgmt_node.c
index ea96927..3fc4f5e 100644
--- a/src/plugins/acl/sess_mgmt_node.c
+++ b/src/plugins/acl/sess_mgmt_node.c
@@ -361,8 +361,9 @@
   if (!pw->interrupt_is_pending)
     {
       pw->interrupt_is_pending = 1;
-      vlib_node_set_interrupt_pending (vlib_mains[thread_index],
-				       acl_fa_worker_session_cleaner_process_node.index);
+      vlib_node_set_interrupt_pending (
+	vlib_get_main_by_index (thread_index),
+	acl_fa_worker_session_cleaner_process_node.index);
       elog_acl_maybe_trace_X1 (am,
 			       "send_one_worker_interrupt: send interrupt to worker %u",
 			       "i4", ((u32) thread_index));
@@ -560,7 +561,7 @@
 {
   int i;
   /* Can't use vec_len(am->per_worker_data) since the threads might not have come up yet; */
-  int n_threads = vec_len (vlib_mains);
+  int n_threads = vlib_get_n_threads ();
   for (i = 0; i < n_threads; i++)
     {
       send_one_worker_interrupt (vm, am, i);
@@ -600,7 +601,7 @@
        *
        * Also, while we are at it, calculate the earliest we need to wake up.
        */
-      for (ti = 0; ti < vec_len (vlib_mains); ti++)
+      for (ti = 0; ti < vlib_get_n_threads (); ti++)
 	{
 	  if (ti >= vec_len (am->per_worker_data))
 	    {
@@ -746,7 +747,7 @@
 
 	    /* now wait till they all complete */
 	    acl_log_info ("CLEANER mains len: %u per-worker len: %d",
-			  vec_len (vlib_mains),
+			  vlib_get_n_threads (),
 			  vec_len (am->per_worker_data));
 	    vec_foreach (pw0, am->per_worker_data)
 	    {
diff --git a/src/plugins/acl/session_inlines.h b/src/plugins/acl/session_inlines.h
index 7a8b7ce..edc8a70 100644
--- a/src/plugins/acl/session_inlines.h
+++ b/src/plugins/acl/session_inlines.h
@@ -466,7 +466,7 @@
 {
   u64 curr_sess_count;
   curr_sess_count = am->fa_session_total_adds - am->fa_session_total_dels;
-  return (curr_sess_count + vec_len (vlib_mains) <
+  return (curr_sess_count + vlib_get_n_threads () <
 	  am->fa_conn_table_max_entries);
 }
 
diff --git a/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c b/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c
index 054e45d..81f386c 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c
@@ -1735,7 +1735,7 @@
   for (i = skip_master; i < tm->n_vlib_mains; i++)
     {
       ptd = cmt->per_thread_data + i;
-      numa = vlib_mains[i]->numa_node;
+      numa = vlib_get_main_by_index (i)->numa_node;
 
       ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
 					 CRYPTODEV_MAX_AAD_SIZE,
diff --git a/src/plugins/flowprobe/flowprobe.c b/src/plugins/flowprobe/flowprobe.c
index 37abcfc..2a32fbf 100644
--- a/src/plugins/flowprobe/flowprobe.c
+++ b/src/plugins/flowprobe/flowprobe.c
@@ -1041,13 +1041,13 @@
   vec_reset_length (event_data);
 
   int i;
-  if (vec_len (vlib_mains) == 0)
+  if (vlib_get_n_threads () == 0)
     vec_add1 (worker_vms, vm);
   else
     {
-      for (i = 0; i < vec_len (vlib_mains); i++)
+      for (i = 0; i < vlib_get_n_threads (); i++)
 	{
-	  worker_vm = vlib_mains[i];
+	  worker_vm = vlib_get_main_by_index (i);
 	  if (worker_vm)
 	    vec_add1 (worker_vms, worker_vm);
 	}
diff --git a/src/plugins/hs_apps/echo_client.c b/src/plugins/hs_apps/echo_client.c
index 9da7bc5..4680ae2 100644
--- a/src/plugins/hs_apps/echo_client.c
+++ b/src/plugins/hs_apps/echo_client.c
@@ -940,7 +940,7 @@
 
   /* Turn on the builtin client input nodes */
   for (i = 0; i < thread_main->n_vlib_mains; i++)
-    vlib_node_set_state (vlib_mains[i], echo_clients_node.index,
+    vlib_node_set_state (vlib_get_main_by_index (i), echo_clients_node.index,
 			 VLIB_NODE_STATE_POLLING);
 
   if (preallocate_sessions)
diff --git a/src/plugins/hs_apps/http_server.c b/src/plugins/hs_apps/http_server.c
index e1674d5..72e3f32 100644
--- a/src/plugins/hs_apps/http_server.c
+++ b/src/plugins/hs_apps/http_server.c
@@ -209,7 +209,7 @@
 http_process_free (http_server_args * args)
 {
   vlib_node_runtime_t *rt;
-  vlib_main_t *vm = &vlib_global_main;
+  vlib_main_t *vm = vlib_get_first_main ();
   http_server_main_t *hsm = &http_server_main;
   vlib_node_t *n;
   u32 node_index;
diff --git a/src/plugins/mdata/mdata.c b/src/plugins/mdata/mdata.c
index f74564e..6962296 100644
--- a/src/plugins/mdata/mdata.c
+++ b/src/plugins/mdata/mdata.c
@@ -147,20 +147,21 @@
   if (vec_len (mmp->before_per_thread) == 0)
     {
       mdata_none.node_index = ~0;
-      vec_validate (mmp->before_per_thread, vec_len (vlib_mains) - 1);
+      vec_validate (mmp->before_per_thread, vlib_get_n_threads () - 1);
     }
 
   /* Reset the per-node accumulator, see vec_validate_init_empty above */
   vec_reset_length (mmp->modifies);
 
-  for (i = 0; i < vec_len (vlib_mains); i++)
+  for (i = 0; i < vlib_get_n_threads (); i++)
     {
-      if (vlib_mains[i] == 0)
+      vlib_main_t *ovm = vlib_get_main_by_index (i);
+      if (ovm == 0)
 	continue;
 
-      clib_callback_data_enable_disable
-	(&vlib_mains[i]->vlib_node_runtime_perf_callbacks,
-	 mdata_trace_callback, enable_disable);
+      clib_callback_data_enable_disable (
+	&ovm->vlib_node_runtime_perf_callbacks, mdata_trace_callback,
+	enable_disable);
     }
 
   return rv;
diff --git a/src/plugins/memif/device.c b/src/plugins/memif/device.c
index 9debf2b..101c3f7 100644
--- a/src/plugins/memif/device.c
+++ b/src/plugins/memif/device.c
@@ -413,7 +413,7 @@
 						   thread_index);
   u8 tx_queues = vec_len (mif->tx_queues);
 
-  if (tx_queues < vec_len (vlib_mains))
+  if (tx_queues < vlib_get_n_threads ())
     {
       ASSERT (tx_queues > 0);
       mq = vec_elt_at_index (mif->tx_queues, thread_index % tx_queues);
diff --git a/src/plugins/memif/memif.c b/src/plugins/memif/memif.c
index 80cd902..31a34cc 100644
--- a/src/plugins/memif/memif.c
+++ b/src/plugins/memif/memif.c
@@ -306,8 +306,8 @@
 					      mq->int_clib_file_index);
 	}
       ti = vnet_hw_if_get_rx_queue_thread_index (vnm, qi);
-      mq->buffer_pool_index =
-	vlib_buffer_pool_get_default_for_numa (vm, vlib_mains[ti]->numa_node);
+      mq->buffer_pool_index = vlib_buffer_pool_get_default_for_numa (
+	vm, vlib_get_main_by_index (ti)->numa_node);
       rv = vnet_hw_if_set_rx_queue_mode (vnm, qi, VNET_HW_IF_RX_MODE_DEFAULT);
       vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
 
diff --git a/src/plugins/memif/private.h b/src/plugins/memif/private.h
index a53aca5..22b2ab8 100644
--- a/src/plugins/memif/private.h
+++ b/src/plugins/memif/private.h
@@ -24,7 +24,7 @@
 #define MEMIF_DEFAULT_TX_QUEUES 1
 #define MEMIF_DEFAULT_BUFFER_SIZE 2048
 
-#define MEMIF_MAX_M2S_RING		(vec_len (vlib_mains))
+#define MEMIF_MAX_M2S_RING		(vlib_get_n_threads ())
 #define MEMIF_MAX_S2M_RING		256
 #define MEMIF_MAX_REGION		256
 #define MEMIF_MAX_LOG2_RING_SIZE	14
diff --git a/src/plugins/nat/lib/ipfix_logging.c b/src/plugins/nat/lib/ipfix_logging.c
index d4ede6b..38a2cc9 100644
--- a/src/plugins/nat/lib/ipfix_logging.c
+++ b/src/plugins/nat/lib/ipfix_logging.c
@@ -1288,12 +1288,12 @@
 
   if (PREDICT_FALSE (!silm->worker_vms))
     {
-      for (i = 1; i < vec_len (vlib_mains); i++)
-        {
-          worker_vm = vlib_mains[i];
-          if (worker_vm)
-            vec_add1 (silm->worker_vms, worker_vm);
-        }
+      for (i = 1; i < vlib_get_n_threads (); i++)
+	{
+	  worker_vm = vlib_get_main_by_index (i);
+	  if (worker_vm)
+	    vec_add1 (silm->worker_vms, worker_vm);
+	}
     }
 
   /* Trigger flush for each worker thread */
diff --git a/src/plugins/nat/nat44-ei/nat44_ei_ha.c b/src/plugins/nat/nat44-ei/nat44_ei_ha.c
index ca99efc..3d634dc 100644
--- a/src/plugins/nat/nat44-ei/nat44_ei_ha.c
+++ b/src/plugins/nat/nat44-ei/nat44_ei_ha.c
@@ -711,7 +711,7 @@
   nat_ha_message_header_t *h;
   ip4_header_t *ip;
   udp_header_t *udp;
-  vlib_main_t *vm = vlib_mains[thread_index];
+  vlib_main_t *vm = vlib_get_main_by_index (thread_index);
 
   ip = vlib_buffer_get_current (b);
   udp = ip4_next_header (ip);
@@ -737,7 +737,7 @@
   nat44_ei_main_t *nm = &nat44_ei_main;
   nat_ha_main_t *ha = &nat_ha_main;
   nat_ha_per_thread_data_t *td = &ha->per_thread_data[thread_index];
-  vlib_main_t *vm = vlib_mains[thread_index];
+  vlib_main_t *vm = vlib_get_main_by_index (thread_index);
   vlib_buffer_t *b = 0;
   vlib_frame_t *f;
   u32 bi = ~0, offset;
@@ -967,12 +967,12 @@
       vlib_process_wait_for_event_or_clock (vm, 1.0);
       event_type = vlib_process_get_events (vm, &event_data);
       vec_reset_length (event_data);
-      for (ti = 0; ti < vec_len (vlib_mains); ti++)
+      for (ti = 0; ti < vlib_get_n_threads (); ti++)
 	{
 	  if (ti >= vec_len (ha->per_thread_data))
 	    continue;
 
-	  vlib_node_set_interrupt_pending (vlib_mains[ti],
+	  vlib_node_set_interrupt_pending (vlib_get_main_by_index (ti),
 					   nat_ha_worker_node.index);
 	}
     }
diff --git a/src/plugins/nat/nat64/nat64.c b/src/plugins/nat/nat64/nat64.c
index 6e5ceec..1c1cdfb 100644
--- a/src/plugins/nat/nat64/nat64.c
+++ b/src/plugins/nat/nat64/nat64.c
@@ -998,7 +998,7 @@
       static_bib->is_add = is_add;
       static_bib->thread_index = thread_index;
       static_bib->done = 0;
-      worker_vm = vlib_mains[thread_index];
+      worker_vm = vlib_get_main_by_index (thread_index);
       if (worker_vm)
 	vlib_node_set_interrupt_pending (worker_vm,
 					 nat64_static_bib_worker_node.index);
@@ -1452,13 +1452,13 @@
   int i;
   uword event_type, *event_data = 0;
 
-  if (vec_len (vlib_mains) == 0)
+  if (vlib_get_n_threads () == 0)
     vec_add1 (worker_vms, vm);
   else
     {
-      for (i = 0; i < vec_len (vlib_mains); i++)
+      for (i = 0; i < vlib_get_n_threads (); i++)
 	{
-	  worker_vm = vlib_mains[i];
+	  worker_vm = vlib_get_main_by_index (i);
 	  if (worker_vm)
 	    vec_add1 (worker_vms, worker_vm);
 	}
diff --git a/src/plugins/nat/pnat/tests/pnat_test.c b/src/plugins/nat/pnat/tests/pnat_test.c
index ab55e7e..fd91ab2 100644
--- a/src/plugins/nat/pnat/tests/pnat_test.c
+++ b/src/plugins/nat/pnat/tests/pnat_test.c
@@ -347,7 +347,7 @@
 static void test_table(test_t *t, int no_tests) {
     // walk through table of tests
     int i;
-    vlib_main_t *vm = &vlib_global_main;
+    vlib_main_t *vm = vlib_get_first_main();
 
     /* Generate packet data */
     for (i = 0; i < no_tests; i++) {
@@ -376,7 +376,7 @@
 void test_performance(void) {
     pnat_main_t *pm = &pnat_main;
     int i;
-    vlib_main_t *vm = &vlib_global_main;
+    vlib_main_t *vm = vlib_get_first_main();
 
     for (i = 0; i < sizeof(rules) / sizeof(rules[0]); i++) {
         add_translation(&rules[i]);
@@ -505,7 +505,7 @@
 
 void test_checksum(void) {
     int i;
-    vlib_main_t *vm = &vlib_global_main;
+    vlib_main_t *vm = vlib_get_first_main();
     pnat_main_t *pm = &pnat_main;
 
     test_t test = {
@@ -559,7 +559,7 @@
 
     clib_mem_init(0, 3ULL << 30);
 
-    vlib_main_t *vm = &vlib_global_main;
+    vlib_main_t *vm = vlib_get_first_main();
 
     buffers_vector = buffer_init(buffers_vector, 256);
 
diff --git a/src/plugins/nsim/nsim.c b/src/plugins/nsim/nsim.c
index 00bf84e..2720673 100644
--- a/src/plugins/nsim/nsim.c
+++ b/src/plugins/nsim/nsim.c
@@ -215,7 +215,7 @@
   i = (!nsm->poll_main_thread && num_workers) ? 1 : 0;
   for (; i < num_workers + 1; i++)
     {
-      vlib_main_t *this_vm = vlib_mains[i];
+      vlib_main_t *this_vm = vlib_get_main_by_index (i);
 
       vlib_node_set_state (this_vm, nsim_input_node.index,
 			   VLIB_NODE_STATE_POLLING);
diff --git a/src/plugins/perfmon/perfmon.c b/src/plugins/perfmon/perfmon.c
index 7a69d45..316e7a5 100644
--- a/src/plugins/perfmon/perfmon.c
+++ b/src/plugins/perfmon/perfmon.c
@@ -48,8 +48,8 @@
   uword page_size = clib_mem_get_page_size ();
 
   if (pm->is_running)
-    for (int i = 0; i < vec_len (vlib_mains); i++)
-      vlib_node_set_dispatch_wrapper (vlib_mains[i], 0);
+    for (int i = 0; i < vlib_get_n_threads (); i++)
+      vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i), 0);
 
   for (int i = 0; i < vec_len (pm->fds_to_close); i++)
     close (pm->fds_to_close[i]);
@@ -104,7 +104,7 @@
     {
       vec_add2 (pm->default_instance_type, it, 1);
       it->name = is_node ? "Thread/Node" : "Thread";
-      for (int i = 0; i < vec_len (vlib_mains); i++)
+      for (int i = 0; i < vlib_get_n_threads (); i++)
 	{
 	  vlib_worker_thread_t *w = vlib_worker_threads + i;
 	  perfmon_instance_t *in;
@@ -114,7 +114,7 @@
 	  in->name = (char *) format (0, "%s (%u)%c", w->name, i, 0);
 	}
       if (is_node)
-	vec_validate (pm->thread_runtimes, vec_len (vlib_mains) - 1);
+	vec_validate (pm->thread_runtimes, vlib_get_n_threads () - 1);
     }
   else
     {
@@ -234,8 +234,8 @@
     }
   if (pm->active_bundle->type == PERFMON_BUNDLE_TYPE_NODE)
     {
-      for (int i = 0; i < vec_len (vlib_mains); i++)
-	vlib_node_set_dispatch_wrapper (vlib_mains[i],
+      for (int i = 0; i < vlib_get_n_threads (); i++)
+	vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i),
 					perfmon_dispatch_wrapper);
     }
   pm->sample_time = vlib_time_now (vm);
@@ -254,8 +254,8 @@
 
   if (pm->active_bundle->type == PERFMON_BUNDLE_TYPE_NODE)
     {
-      for (int i = 0; i < vec_len (vlib_mains); i++)
-	vlib_node_set_dispatch_wrapper (vlib_mains[i], 0);
+      for (int i = 0; i < vlib_get_n_threads (); i++)
+	vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i), 0);
     }
 
   for (int i = 0; i < n_groups; i++)
diff --git a/src/plugins/tracedump/tracedump.c b/src/plugins/tracedump/tracedump.c
index ab86ef9..7c83ba2 100644
--- a/src/plugins/tracedump/tracedump.c
+++ b/src/plugins/tracedump/tracedump.c
@@ -224,10 +224,10 @@
   if (vec_len (client_trace_cache) == 0
       && (iterator_thread_id != ~0 || iterator_position != ~0))
     {
-      vlib_worker_thread_barrier_sync (&vlib_global_main);
+      vlib_worker_thread_barrier_sync (vlib_get_first_main ());
 
       /* Make a slot for each worker thread */
-      vec_validate (client_trace_cache, vec_len (vlib_mains) - 1);
+      vec_validate (client_trace_cache, vlib_get_n_threads () - 1);
       i = 0;
 
       /* *INDENT-OFF* */
@@ -250,7 +250,7 @@
         i++;
       }));
       /* *INDENT-ON* */
-      vlib_worker_thread_barrier_release (&vlib_global_main);
+      vlib_worker_thread_barrier_release (vlib_get_first_main ());
     }
 
   /* Save the cache, one way or the other */
@@ -268,7 +268,8 @@
 
 	  vec_reset_length (s);
 
-	  s = format (s, "%U", format_vlib_trace, &vlib_global_main, th[0]);
+	  s =
+	    format (s, "%U", format_vlib_trace, vlib_get_first_main (), th[0]);
 
 	  dmp = vl_msg_api_alloc (sizeof (*dmp) + vec_len (s));
 	  dmp->_vl_msg_id =
diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c
index e1c871c..f8cfb03 100644
--- a/src/vlib/buffer.c
+++ b/src/vlib/buffer.c
@@ -569,7 +569,7 @@
   bp->data_size = data_size;
   bp->numa_node = m->numa_node;
 
-  vec_validate_aligned (bp->threads, vec_len (vlib_mains) - 1,
+  vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
 			CLIB_CACHE_LINE_BYTES);
 
   alloc_size = vlib_buffer_alloc_size (bm->ext_hdr_size, data_size);
@@ -673,7 +673,7 @@
   vec_foreach (bp, bm->buffer_pools)
     {
       clib_spinlock_lock (&bp->lock);
-      vec_validate_aligned (bp->threads, vec_len (vlib_mains) - 1,
+      vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
 			    CLIB_CACHE_LINE_BYTES);
       clib_spinlock_unlock (&bp->lock);
     }
diff --git a/src/vlib/buffer_node.h b/src/vlib/buffer_node.h
index 0fa18d6..17eb54e4 100644
--- a/src/vlib/buffer_node.h
+++ b/src/vlib/buffer_node.h
@@ -545,7 +545,8 @@
 	{
 	  hf->n_vectors = VLIB_FRAME_SIZE;
 	  vlib_put_frame_queue_elt (hf);
-	  vlib_mains[current_thread_index]->check_frame_queues = 1;
+	  vlib_get_main_by_index (current_thread_index)->check_frame_queues =
+	    1;
 	  current_thread_index = ~0;
 	  ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
 	  hf = 0;
@@ -574,7 +575,7 @@
 	  if (1 || hf->n_vectors == hf->last_n_vectors)
 	    {
 	      vlib_put_frame_queue_elt (hf);
-	      vlib_mains[i]->check_frame_queues = 1;
+	      vlib_get_main_by_index (i)->check_frame_queues = 1;
 	      ptd->handoff_queue_elt_by_thread_index[i] = 0;
 	    }
 	  else
diff --git a/src/vlib/global_funcs.h b/src/vlib/global_funcs.h
index 9dd01fb..b1d636f 100644
--- a/src/vlib/global_funcs.h
+++ b/src/vlib/global_funcs.h
@@ -19,13 +19,31 @@
 #ifndef included_vlib_global_funcs_h_
 #define included_vlib_global_funcs_h_
 
+always_inline u32
+vlib_get_n_threads ()
+{
+  return vec_len (vlib_mains);
+}
+
+always_inline vlib_main_t *
+vlib_get_main_by_index (u32 thread_index)
+{
+  vlib_main_t *vm;
+  vm = vlib_mains[thread_index];
+  ASSERT (vm);
+  return vm;
+}
+
 always_inline vlib_main_t *
 vlib_get_main (void)
 {
-  vlib_main_t *vm;
-  vm = vlib_mains[vlib_get_thread_index ()];
-  ASSERT (vm);
-  return vm;
+  return vlib_get_main_by_index (vlib_get_thread_index ());
+}
+
+always_inline vlib_main_t *
+vlib_get_first_main (void)
+{
+  return vlib_get_main_by_index (0);
 }
 
 always_inline vlib_thread_main_t *
diff --git a/src/vlib/node.c b/src/vlib/node.c
index 618baec..3c96d9d 100644
--- a/src/vlib/node.c
+++ b/src/vlib/node.c
@@ -613,9 +613,9 @@
 
   if (vec_len (stat_vms) == 0)
     {
-      for (i = 0; i < vec_len (vlib_mains); i++)
+      for (i = 0; i < vlib_get_n_threads (); i++)
 	{
-	  stat_vm = vlib_mains[i];
+	  stat_vm = vlib_get_main_by_index (i);
 	  if (stat_vm)
 	    vec_add1 (stat_vms, stat_vm);
 	}
@@ -837,10 +837,11 @@
 	{
 	  n->function = fnr->function;
 
-	  for (int i = 0; i < vec_len (vlib_mains); i++)
+	  for (int i = 0; i < vlib_get_n_threads (); i++)
 	    {
 	      vlib_node_runtime_t *nrt;
-	      nrt = vlib_node_get_runtime (vlib_mains[i], n->index);
+	      nrt =
+		vlib_node_get_runtime (vlib_get_main_by_index (i), n->index);
 	      nrt->function = fnr->function;
 	    }
 	  return 0;
diff --git a/src/vlib/node_cli.c b/src/vlib/node_cli.c
index 39fca6e..8cf5794 100644
--- a/src/vlib/node_cli.c
+++ b/src/vlib/node_cli.c
@@ -210,14 +210,14 @@
       /* Updating the stats for multithreaded use cases.
        * We need to dup the nodes to sum the stats from all threads.*/
       nodes = vec_dup (nm->nodes);
-      for (i = 1; i < vec_len (vlib_mains); i++)
+      for (i = 1; i < vlib_get_n_threads (); i++)
 	{
 	  vlib_node_main_t *nm_clone;
 	  vlib_main_t *vm_clone;
 	  vlib_node_runtime_t *rt;
 	  vlib_node_t *n;
 
-	  vm_clone = vlib_mains[i];
+	  vm_clone = vlib_get_main_by_index (i);
 	  nm_clone = &vm_clone->node_main;
 
 	  for (j = 0; j < vec_len (nm_clone->nodes); j++)
@@ -516,9 +516,9 @@
 	  || unformat (input, "su"))
 	summary = 1;
 
-      for (i = 0; i < vec_len (vlib_mains); i++)
+      for (i = 0; i < vlib_get_n_threads (); i++)
 	{
-	  stat_vm = vlib_mains[i];
+	  stat_vm = vlib_get_main_by_index (i);
 	  if (stat_vm)
 	    vec_add1 (stat_vms, stat_vm);
 	}
@@ -592,7 +592,7 @@
 		}
 	    }
 
-	  if (vec_len (vlib_mains) > 1)
+	  if (vlib_get_n_threads () > 1)
 	    {
 	      vlib_worker_thread_t *w = vlib_worker_threads + j;
 	      if (j > 0)
@@ -665,9 +665,9 @@
   vlib_main_t **stat_vms = 0, *stat_vm;
   vlib_node_runtime_t *r;
 
-  for (i = 0; i < vec_len (vlib_mains); i++)
+  for (i = 0; i < vlib_get_n_threads (); i++)
     {
-      stat_vm = vlib_mains[i];
+      stat_vm = vlib_get_main_by_index (i);
       if (stat_vm)
 	vec_add1 (stat_vms, stat_vm);
     }
@@ -848,10 +848,10 @@
 
   s = format (s, "\n%8s %=12s %=12s %=12s %=12s %=12s\n", "Thread", "Calls",
 	      "Clocks", "Vectors", "Max Clock", "Max Vectors");
-  for (i = 0; i < vec_len (vlib_mains); i++)
+  for (i = 0; i < vlib_get_n_threads (); i++)
     {
-      n = vlib_get_node (vlib_mains[i], node_index);
-      vlib_node_sync_stats (vlib_mains[i], n);
+      n = vlib_get_node (vlib_get_main_by_index (i), node_index);
+      vlib_node_sync_stats (vlib_get_main_by_index (i), n);
 
       cl = n->stats_total.clocks - n->stats_last_clear.clocks;
       ca = n->stats_total.calls - n->stats_last_clear.calls;
diff --git a/src/vlib/threads.c b/src/vlib/threads.c
index dd7de73..939f910 100644
--- a/src/vlib/threads.c
+++ b/src/vlib/threads.c
@@ -574,7 +574,8 @@
 
   __os_thread_index = w - vlib_worker_threads;
 
-  vlib_process_start_switch_stack (vlib_mains[__os_thread_index], 0);
+  vlib_process_start_switch_stack (vlib_get_main_by_index (__os_thread_index),
+				   0);
   rv = (void *) clib_calljmp
     ((uword (*)(uword)) w->thread_function,
      (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
@@ -1001,7 +1002,7 @@
 
   ASSERT (vlib_get_thread_index () == 0);
 
-  vm = vlib_mains[0];
+  vm = vlib_get_first_main ();
   nm = &vm->node_main;
 
   ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
@@ -1017,11 +1018,11 @@
       vlib_node_sync_stats (vm, n);
     }
 
-  for (i = 1; i < vec_len (vlib_mains); i++)
+  for (i = 1; i < vlib_get_n_threads (); i++)
     {
       vlib_node_t *n;
 
-      vm_clone = vlib_mains[i];
+      vm_clone = vlib_get_main_by_index (i);
       nm_clone = &vm_clone->node_main;
 
       for (j = 0; j < vec_len (nm_clone->nodes); j++)
@@ -1049,7 +1050,7 @@
 
   int j;
 
-  vm = vlib_mains[0];
+  vm = vlib_get_first_main ();
   nm = &vm->node_main;
   vm_clone = vlib_get_main ();
   nm_clone = &vm_clone->node_main;
@@ -1425,7 +1426,7 @@
 {
   f64 deadline;
   f64 now = vlib_time_now (vm);
-  u32 count = vec_len (vlib_mains) - 1;
+  u32 count = vlib_get_n_threads () - 1;
 
   /* No worker threads? */
   if (count == 0)
@@ -1451,7 +1452,7 @@
 u8
 vlib_worker_thread_barrier_held (void)
 {
-  if (vec_len (vlib_mains) < 2)
+  if (vlib_get_n_threads () < 2)
     return (1);
 
   return (*vlib_worker_threads->wait_at_barrier == 1);
@@ -1469,13 +1470,13 @@
   u32 count;
   int i;
 
-  if (vec_len (vlib_mains) < 2)
+  if (vlib_get_n_threads () < 2)
     return;
 
   ASSERT (vlib_get_thread_index () == 0);
 
   vlib_worker_threads[0].barrier_caller = func_name;
-  count = vec_len (vlib_mains) - 1;
+  count = vlib_get_n_threads () - 1;
 
   /* Record entry relative to last close */
   now = vlib_time_now (vm);
@@ -1497,10 +1498,12 @@
    * the barrier hold-down timer.
    */
   max_vector_rate = 0.0;
-  for (i = 1; i < vec_len (vlib_mains); i++)
-    max_vector_rate =
-      clib_max (max_vector_rate,
-		(f64) vlib_last_vectors_per_main_loop (vlib_mains[i]));
+  for (i = 1; i < vlib_get_n_threads (); i++)
+    {
+      vlib_main_t *ovm = vlib_get_main_by_index (i);
+      max_vector_rate = clib_max (max_vector_rate,
+				  (f64) vlib_last_vectors_per_main_loop (ovm));
+    }
 
   vlib_worker_threads[0].barrier_sync_count++;
 
@@ -1562,7 +1565,7 @@
   f64 t_update_main = 0.0;
   int refork_needed = 0;
 
-  if (vec_len (vlib_mains) < 2)
+  if (vlib_get_n_threads () < 2)
     return;
 
   ASSERT (vlib_get_thread_index () == 0);
@@ -1594,7 +1597,7 @@
       /* Do per thread rebuilds in parallel */
       refork_needed = 1;
       clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
-			     (vec_len (vlib_mains) - 1));
+			     (vlib_get_n_threads () - 1));
       now = vlib_time_now (vm);
       t_update_main = now - vm->barrier_epoch;
     }
@@ -1668,7 +1671,7 @@
 {
   ASSERT (vlib_get_thread_index () == 0);
 
-  if (vec_len (vlib_mains) < 2)
+  if (vlib_get_n_threads () < 2)
     return;
 
   if (vlib_worker_thread_barrier_held ())
@@ -1677,7 +1680,7 @@
   u32 *counts = 0;
   u32 ii;
 
-  vec_validate (counts, vec_len (vlib_mains) - 1);
+  vec_validate (counts, vlib_get_n_threads () - 1);
 
   /* record the current loop counts */
   vec_foreach_index (ii, vlib_mains)
@@ -1973,24 +1976,24 @@
 		   verbose, format_clib_timebase_time,
 		   clib_timebase_now (tb));
 
-  if (vec_len (vlib_mains) == 1)
+  if (vlib_get_n_threads () == 1)
     return 0;
 
   vlib_cli_output (vm, "Time last barrier release %.9f",
 		   vm->time_last_barrier_release);
 
-  for (i = 1; i < vec_len (vlib_mains); i++)
+  for (i = 1; i < vlib_get_n_threads (); i++)
     {
-      if (vlib_mains[i] == 0)
+      vlib_main_t *ovm = vlib_get_main_by_index (i);
+      if (ovm == 0)
 	continue;
 
-      vlib_cli_output (vm, "%d: %U", i, format_clib_time,
-		       &vlib_mains[i]->clib_time, verbose);
+      vlib_cli_output (vm, "%d: %U", i, format_clib_time, &ovm->clib_time,
+		       verbose);
 
-      vlib_cli_output (vm, "Thread %d offset %.9f error %.9f", i,
-		       vlib_mains[i]->time_offset,
-		       vm->time_last_barrier_release -
-		       vlib_mains[i]->time_last_barrier_release);
+      vlib_cli_output (
+	vm, "Thread %d offset %.9f error %.9f", i, ovm->time_offset,
+	vm->time_last_barrier_release - ovm->time_last_barrier_release);
     }
   return 0;
 }
diff --git a/src/vlib/threads.h b/src/vlib/threads.h
index 6894cdf..eb31edc 100644
--- a/src/vlib/threads.h
+++ b/src/vlib/threads.h
@@ -521,7 +521,7 @@
   vlib_main_t *vm;
   vlib_thread_main_t *tm = &vlib_thread_main;
   ASSERT (worker_index < tm->n_vlib_mains - 1);
-  vm = vlib_mains[worker_index + 1];
+  vm = vlib_get_main_by_index (worker_index + 1);
   ASSERT (vm);
   return vm;
 }
diff --git a/src/vlib/unix/cli.c b/src/vlib/unix/cli.c
index 96e22a2..6c98867 100644
--- a/src/vlib/unix/cli.c
+++ b/src/vlib/unix/cli.c
@@ -2886,9 +2886,9 @@
        * the same new name.
        * Then, throw away the old shared name-vector.
        */
-      for (i = 0; i < vec_len (vlib_mains); i++)
+      for (i = 0; i < vlib_get_n_threads (); i++)
 	{
-	  this_vlib_main = vlib_mains[i];
+	  this_vlib_main = vlib_get_main_by_index (i);
 	  if (this_vlib_main == 0)
 	    continue;
 	  n = vlib_get_node (this_vlib_main,
diff --git a/src/vlib/unix/input.c b/src/vlib/unix/input.c
index 6398148..9c7c54f 100644
--- a/src/vlib/unix/input.c
+++ b/src/vlib/unix/input.c
@@ -198,9 +198,9 @@
 	  }
 	node->input_main_loops_per_call = 0;
       }
-    else if (is_main == 0 && vector_rate < 2
-	     && (vlib_global_main.time_last_barrier_release + 0.5 < now)
-	     && nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0)
+    else if (is_main == 0 && vector_rate < 2 &&
+	     (vlib_get_first_main ()->time_last_barrier_release + 0.5 < now) &&
+	     nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0)
       {
 	timeout = 10e-3;
 	timeout_ms = max_timeout_ms;
diff --git a/src/vlib/unix/main.c b/src/vlib/unix/main.c
index e86d421..f73f9ca 100644
--- a/src/vlib/unix/main.c
+++ b/src/vlib/unix/main.c
@@ -692,7 +692,7 @@
 int
 vlib_unix_main (int argc, char *argv[])
 {
-  vlib_main_t *vm = &vlib_global_main;	/* one and only time for this! */
+  vlib_main_t *vm = vlib_get_first_main (); /* one and only time for this! */
   unformat_input_t input;
   clib_error_t *e;
   int i;
diff --git a/src/vlibmemory/vlib_api.c b/src/vlibmemory/vlib_api.c
index 51378a5..f033070 100644
--- a/src/vlibmemory/vlib_api.c
+++ b/src/vlibmemory/vlib_api.c
@@ -561,7 +561,7 @@
 void
 vl_api_send_pending_rpc_requests (vlib_main_t * vm)
 {
-  vlib_main_t *vm_global = &vlib_global_main;
+  vlib_main_t *vm_global = vlib_get_first_main ();
 
   ASSERT (vm != vm_global);
 
@@ -576,7 +576,7 @@
 				    u8 force_rpc)
 {
   vl_api_rpc_call_t *mp;
-  vlib_main_t *vm_global = &vlib_global_main;
+  vlib_main_t *vm_global = vlib_get_first_main ();
   vlib_main_t *vm = vlib_get_main ();
 
   /* Main thread and not a forced RPC: call the function directly */
diff --git a/src/vnet/crypto/cli.c b/src/vnet/crypto/cli.c
index d9635dd..a6098a1 100644
--- a/src/vnet/crypto/cli.c
+++ b/src/vnet/crypto/cli.c
@@ -331,8 +331,8 @@
 
   for (i = skip_master; i < tm->n_vlib_mains; i++)
     {
-      vlib_node_state_t state =
-	vlib_node_get_state (vlib_mains[i], cm->crypto_node_index);
+      vlib_node_state_t state = vlib_node_get_state (
+	vlib_get_main_by_index (i), cm->crypto_node_index);
       if (state == VLIB_NODE_STATE_POLLING)
 	vlib_cli_output (vm, "threadId: %-6d POLLING", i);
       if (state == VLIB_NODE_STATE_INTERRUPT)
diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c
index 9d85047..2ce1b89 100644
--- a/src/vnet/crypto/crypto.c
+++ b/src/vnet/crypto/crypto.c
@@ -477,9 +477,9 @@
   if (state_change)
     for (i = skip_master; i < tm->n_vlib_mains; i++)
       {
-	if (state !=
-	    vlib_node_get_state (vlib_mains[i], cm->crypto_node_index))
-	  vlib_node_set_state (vlib_mains[i], cm->crypto_node_index, state);
+	vlib_main_t *ovm = vlib_get_main_by_index (i);
+	if (state != vlib_node_get_state (ovm, cm->crypto_node_index))
+	  vlib_node_set_state (ovm, cm->crypto_node_index, state);
       }
   return 0;
 }
@@ -587,9 +587,9 @@
   if (state_change)
     for (i = skip_master; i < tm->n_vlib_mains; i++)
       {
-	if (state !=
-	    vlib_node_get_state (vlib_mains[i], cm->crypto_node_index))
-	  vlib_node_set_state (vlib_mains[i], cm->crypto_node_index, state);
+	vlib_main_t *ovm = vlib_get_main_by_index (i);
+	if (state != vlib_node_get_state (ovm, cm->crypto_node_index))
+	  vlib_node_set_state (ovm, cm->crypto_node_index, state);
       }
 
   if (is_enable)
@@ -623,8 +623,9 @@
 
   for (i = skip_master; i < tm->n_vlib_mains; i++)
     {
-      if (state != vlib_node_get_state (vlib_mains[i], cm->crypto_node_index))
-	vlib_node_set_state (vlib_mains[i], cm->crypto_node_index, state);
+      vlib_main_t *ovm = vlib_get_main_by_index (i);
+      if (state != vlib_node_get_state (ovm, cm->crypto_node_index))
+	vlib_node_set_state (ovm, cm->crypto_node_index, state);
     }
 }
 
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
index 1277318..670be69 100644
--- a/src/vnet/crypto/crypto.h
+++ b/src/vnet/crypto/crypto.h
@@ -583,7 +583,7 @@
       if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT)
 	{
 	  for (; i < tm->n_vlib_mains; i++)
-	    vlib_node_set_interrupt_pending (vlib_mains[i],
+	    vlib_node_set_interrupt_pending (vlib_get_main_by_index (i),
 					     cm->crypto_node_index);
 	}
     }
diff --git a/src/vnet/crypto/node.c b/src/vnet/crypto/node.c
index e1186f4..7f34ec1 100644
--- a/src/vnet/crypto/node.c
+++ b/src/vnet/crypto/node.c
@@ -138,8 +138,9 @@
       if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT
 	  && n_elts > 0)
 	{
-	  vlib_node_set_interrupt_pending (vlib_mains[enqueue_thread_idx],
-					   cm->crypto_node_index);
+	  vlib_node_set_interrupt_pending (
+	    vlib_get_main_by_index (enqueue_thread_idx),
+	    cm->crypto_node_index);
 	}
 
       n_elts = 0;
diff --git a/src/vnet/interface/runtime.c b/src/vnet/interface/runtime.c
index c763127..20ac51f 100644
--- a/src/vnet/interface/runtime.c
+++ b/src/vnet/interface/runtime.c
@@ -63,7 +63,7 @@
   vnet_hw_if_rx_queue_t *rxq;
   vnet_hw_if_rxq_poll_vector_t *pv, **d = 0;
   vlib_node_state_t *per_thread_node_state = 0;
-  u32 n_threads = vec_len (vlib_mains);
+  u32 n_threads = vlib_get_n_threads ();
   u16 *per_thread_node_adaptive = 0;
   int something_changed = 0;
   clib_bitmap_t *pending_int = 0;
@@ -133,10 +133,11 @@
    * unnecesary barrier */
   for (int i = 0; i < n_threads; i++)
     {
+      vlib_main_t *ovm = vlib_get_main_by_index (i);
       vlib_node_state_t old_state;
       vec_sort_with_function (d[i], poll_data_sort);
 
-      old_state = vlib_node_get_state (vlib_mains[i], node_index);
+      old_state = vlib_node_get_state (ovm, node_index);
       if (per_thread_node_state[i] != old_state)
 	{
 	  something_changed = 1;
@@ -150,7 +151,7 @@
       if (something_changed == 0)
 	{
 	  vnet_hw_if_rx_node_runtime_t *rt;
-	  rt = vlib_node_get_runtime_data (vlib_mains[i], node_index);
+	  rt = vlib_node_get_runtime_data (ovm, node_index);
 	  if (vec_len (rt->rxq_poll_vector) != vec_len (d[i]))
 	    something_changed = 1;
 	  else if (memcmp (d[i], rt->rxq_poll_vector,
@@ -178,7 +179,7 @@
 
       for (int i = 0; i < n_threads; i++)
 	{
-	  vlib_main_t *vm = vlib_mains[i];
+	  vlib_main_t *vm = vlib_get_main_by_index (i);
 	  vnet_hw_if_rx_node_runtime_t *rt;
 	  rt = vlib_node_get_runtime_data (vm, node_index);
 	  pv = rt->rxq_poll_vector;
diff --git a/src/vnet/interface/rx_queue_funcs.h b/src/vnet/interface/rx_queue_funcs.h
index c36263e..e1e6c33 100644
--- a/src/vnet/interface/rx_queue_funcs.h
+++ b/src/vnet/interface/rx_queue_funcs.h
@@ -53,7 +53,7 @@
 {
   vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index);
   vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rxq->hw_if_index);
-  vlib_main_t *vm = vlib_mains[rxq->thread_index];
+  vlib_main_t *vm = vlib_get_main_by_index (rxq->thread_index);
   vnet_hw_if_rx_node_runtime_t *rt;
   if (PREDICT_FALSE (rxq->mode != VNET_HW_IF_RX_MODE_INTERRUPT &&
 		     rxq->mode != VNET_HW_IF_RX_MODE_ADAPTIVE))
diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c
index 906a73e..eb8a716 100644
--- a/src/vnet/session/application.c
+++ b/src/vnet/session/application.c
@@ -1553,7 +1553,7 @@
   application_t *app;
   int i, n_threads;
 
-  n_threads = vec_len (vlib_mains);
+  n_threads = vlib_get_n_threads ();
 
   for (i = 0; i < n_threads; i++)
     {
diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c
index b7c1702..469ec0e 100644
--- a/src/vnet/session/session.c
+++ b/src/vnet/session/session.c
@@ -1689,7 +1689,7 @@
       wrk->ctrl_head = clib_llist_make_head (wrk->event_elts, evt_list);
       wrk->new_head = clib_llist_make_head (wrk->event_elts, evt_list);
       wrk->old_head = clib_llist_make_head (wrk->event_elts, evt_list);
-      wrk->vm = vlib_mains[i];
+      wrk->vm = vlib_get_main_by_index (i);
       wrk->last_vlib_time = vlib_time_now (vm);
       wrk->last_vlib_us_time = wrk->last_vlib_time * CLIB_US_TIME_FREQ;
       vec_validate (wrk->session_to_enqueue, smm->last_transport_proto_type);
diff --git a/src/vnet/session/session_debug.c b/src/vnet/session/session_debug.c
index c042e9e..349d1ec 100644
--- a/src/vnet/session/session_debug.c
+++ b/src/vnet/session/session_debug.c
@@ -120,7 +120,7 @@
 void
 dump_thread_0_event_queue (void)
 {
-  vlib_main_t *vm = &vlib_global_main;
+  vlib_main_t *vm = vlib_get_first_main ();
   u32 my_thread_index = vm->thread_index;
   session_event_t _e, *e = &_e;
   svm_msg_q_shared_queue_t *sq;
diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c
index 7ceb9ea..ccf93cb 100644
--- a/src/vnet/session/session_node.c
+++ b/src/vnet/session/session_node.c
@@ -1534,7 +1534,7 @@
 static clib_error_t *
 session_queue_exit (vlib_main_t * vm)
 {
-  if (vec_len (vlib_mains) < 2)
+  if (vlib_get_n_threads () < 2)
     return 0;
 
   /*
diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c
index 161b8ef..72161ec 100644
--- a/src/vnet/tcp/tcp.c
+++ b/src/vnet/tcp/tcp.c
@@ -1330,7 +1330,7 @@
       vec_reset_length (wrk->pending_deq_acked);
       vec_reset_length (wrk->pending_disconnects);
       vec_reset_length (wrk->pending_resets);
-      wrk->vm = vlib_mains[thread];
+      wrk->vm = vlib_get_main_by_index (thread);
       wrk->max_timers_per_loop = 10;
 
       if (thread > 0)
diff --git a/src/vnet/unix/gdb_funcs.c b/src/vnet/unix/gdb_funcs.c
index fc61c54..ce8db3a 100644
--- a/src/vnet/unix/gdb_funcs.c
+++ b/src/vnet/unix/gdb_funcs.c
@@ -272,8 +272,8 @@
             goto done;
           }
 
-        s = format (s, "Packet %d\n%U\n\n", i + 1,
-                         format_vlib_trace, vlib_mains[0], traces[i]);
+	s = format (s, "Packet %d\n%U\n\n", i + 1, format_vlib_trace,
+		    vlib_get_first_main (), traces[i]);
       }
 
   done:
diff --git a/src/vpp/api/gmon.c b/src/vpp/api/gmon.c
index b324445..ff561ef 100644
--- a/src/vpp/api/gmon.c
+++ b/src/vpp/api/gmon.c
@@ -121,7 +121,7 @@
   /* Initial wait for the world to settle down */
   vlib_process_suspend (vm, 5.0);
 
-  for (i = 0; i < vec_len (vlib_mains); i++)
+  for (i = 0; i < vlib_get_n_threads (); i++)
     vec_add1 (gm->my_vlib_mains, vlib_mains[i]);
 
   while (1)
diff --git a/src/vpp/stats/stat_segment.c b/src/vpp/stats/stat_segment.c
index 6a666f5..722edb0 100644
--- a/src/vpp/stats/stat_segment.c
+++ b/src/vpp/stats/stat_segment.c
@@ -623,12 +623,12 @@
    */
   vector_rate = 0.0;
 
-  for (i = 0; i < vec_len (vlib_mains); i++)
+  for (i = 0; i < vlib_get_n_threads (); i++)
     {
 
       f64 this_vector_rate;
 
-      this_vlib_main = vlib_mains[i];
+      this_vlib_main = vlib_get_main_by_index (i);
 
       this_vector_rate = vlib_internal_node_vector_rate (this_vlib_main);
       vlib_clear_internal_node_vector_rate (this_vlib_main);