vlib: disable cpu pinning if not configured
In some environment like when running a lot of functional tests, it can
be useful to run more VPP instances than CPU and let the Linux scheduler
decide what to do. This change disable cpu pinning altogether in the
single-threaded case, provided that no main-core is explicitely
specified in the config
Type: improvement
Change-Id: I8c2f36fdd49c00f9adaaeb4c81aefb27c3420a9b
Signed-off-by: Benoît Ganne <bganne@cisco.com>
Signed-off-by: Mohammed Hawari <mohammed@hawari.fr>
diff --git a/src/vlib/threads.c b/src/vlib/threads.c
index 36a8080..57ba39a 100644
--- a/src/vlib/threads.c
+++ b/src/vlib/threads.c
@@ -175,7 +175,6 @@
vlib_thread_main_t *tm = &vlib_thread_main;
vlib_worker_thread_t *w;
vlib_thread_registration_t *tr;
- cpu_set_t cpuset;
u32 n_vlib_mains = 1;
u32 first_index = 1;
u32 i;
@@ -205,33 +204,26 @@
}
/* grab cpu for main thread */
- if (tm->main_lcore == ~0)
- {
- /* if main-lcore is not set, we try to use lcore 1 */
- if (clib_bitmap_get (avail_cpu, 1))
- tm->main_lcore = 1;
- else
- tm->main_lcore = clib_bitmap_first_set (avail_cpu);
- if (tm->main_lcore == (u8) ~ 0)
- return clib_error_return (0, "no available cpus to be used for the"
- " main thread");
- }
- else
+ if (tm->main_lcore != ~0)
{
if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
return clib_error_return (0, "cpu %u is not available to be used"
" for the main thread", tm->main_lcore);
+ avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
}
- avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
/* assume that there is socket 0 only if there is no data from sysfs */
if (!tm->cpu_socket_bitmap)
tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
/* pin main thread to main_lcore */
- CPU_ZERO (&cpuset);
- CPU_SET (tm->main_lcore, &cpuset);
- pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
+ if (tm->main_lcore != ~0)
+ {
+ cpu_set_t cpuset;
+ CPU_ZERO (&cpuset);
+ CPU_SET (tm->main_lcore, &cpuset);
+ pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
+ }
/* Set up thread 0 */
vec_validate_aligned (vlib_worker_threads, 0, CLIB_CACHE_LINE_BYTES);
@@ -1652,12 +1644,17 @@
clib_error_t *
threads_init (vlib_main_t * vm)
{
+ const vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ if (tm->main_lcore == ~0 && tm->n_vlib_mains > 1)
+ return clib_error_return (0, "Configuration error, a main core must "
+ "be specified when using worker threads");
+
return 0;
}
VLIB_INIT_FUNCTION (threads_init);
-
static clib_error_t *
show_clock_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
diff --git a/src/vlib/threads_cli.c b/src/vlib/threads_cli.c
index d14e9c5..9b30466 100644
--- a/src/vlib/threads_cli.c
+++ b/src/vlib/threads_cli.c
@@ -43,6 +43,7 @@
show_threads_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
+ const vlib_thread_main_t *tm = vlib_get_thread_main ();
vlib_worker_thread_t *w;
int i;
@@ -64,7 +65,7 @@
line = format (line, "%-25U", format_sched_policy_and_priority, w->lwp);
int cpu_id = w->cpu_id;
- if (cpu_id > -1)
+ if (cpu_id > -1 && tm->main_lcore != ~0)
{
int core_id = w->core_id;
int numa_id = w->numa_id;
diff --git a/src/vpp/vnet/main.c b/src/vpp/vnet/main.c
index e9cef5e..f7e5382 100644
--- a/src/vpp/vnet/main.c
+++ b/src/vpp/vnet/main.c
@@ -112,7 +112,7 @@
clib_mem_page_sz_t default_log2_hugepage_sz = CLIB_MEM_PAGE_SZ_UNKNOWN;
unformat_input_t input, sub_input;
u8 *s = 0, *v = 0;
- int main_core = 1;
+ int main_core = ~0;
cpu_set_t cpuset;
void *main_heap;
@@ -316,9 +316,12 @@
unformat_free (&input);
/* set process affinity for main thread */
- CPU_ZERO (&cpuset);
- CPU_SET (main_core, &cpuset);
- pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
+ if (main_core != ~0)
+ {
+ CPU_ZERO (&cpuset);
+ CPU_SET (main_core, &cpuset);
+ pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
+ }
/* Set up the plugin message ID allocator right now... */
vl_msg_api_set_first_available_msg_id (VL_MSG_MEMCLNT_LAST + 1);