thread: Add show threads api
Change-Id: I3124238ab4d43bcef5590bad33a4ff0b5d8b7d15
Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com>
diff --git a/src/plugins/dpdk/device/cli.c b/src/plugins/dpdk/device/cli.c
index c3df1ab..7e168af 100644
--- a/src/plugins/dpdk/device/cli.c
+++ b/src/plugins/dpdk/device/cli.c
@@ -727,7 +727,7 @@
cpu < (dm->hqos_cpu_first_index + dm->hqos_cpu_count))
vlib_cli_output (vm, "Thread %u (%s at lcore %u):", cpu,
vlib_worker_threads[cpu].name,
- vlib_worker_threads[cpu].lcore_id);
+ vlib_worker_threads[cpu].cpu_id);
vec_foreach (dq, dm->devices_by_hqos_cpu[cpu])
{
diff --git a/src/plugins/dpdk/device/common.c b/src/plugins/dpdk/device/common.c
index c64f1d0..8699dc8 100644
--- a/src/plugins/dpdk/device/common.c
+++ b/src/plugins/dpdk/device/common.c
@@ -119,7 +119,7 @@
dpdk_mempool_private_t *privp;
uword tidx = vnet_get_device_input_thread_index (dm->vnet_main,
xd->hw_if_index, j);
- unsigned lcore = vlib_worker_threads[tidx].lcore_id;
+ unsigned lcore = vlib_worker_threads[tidx].cpu_id;
u16 socket_id = rte_lcore_to_socket_id (lcore);
rv =
diff --git a/src/plugins/nat/nat_api.c b/src/plugins/nat/nat_api.c
index 4c532ed..e642edd 100644
--- a/src/plugins/nat/nat_api.c
+++ b/src/plugins/nat/nat_api.c
@@ -206,7 +206,7 @@
rmp->_vl_msg_id = ntohs (VL_API_NAT_WORKER_DETAILS + sm->msg_id_base);
rmp->context = context;
rmp->worker_index = htonl (worker_index);
- rmp->lcore_id = htonl (w->lcore_id);
+ rmp->lcore_id = htonl (w->cpu_id);
strncpy ((char *) rmp->name, (char *) w->name, ARRAY_LEN (rmp->name) - 1);
vl_api_send_msg (reg, (u8 *) rmp);
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index 99ede1e..583185e 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -1319,6 +1319,76 @@
vam->result_ready = 1;
}
+static void vl_api_show_threads_reply_t_handler
+ (vl_api_show_threads_reply_t * mp)
+{
+ vat_main_t *vam = &vat_main;
+ i32 retval = ntohl (mp->retval);
+ int i, count = 0;
+
+ if (retval >= 0)
+ count = ntohl (mp->count);
+
+ for (i = 0; i < count; i++)
+ print (vam->ofp,
+ "\n%-2d %-11s %-11s %-5d %-6d %-4d %-6d",
+ ntohl (mp->thread_data[i].id), mp->thread_data[i].name,
+ mp->thread_data[i].type, ntohl (mp->thread_data[i].pid),
+ ntohl (mp->thread_data[i].cpu_id), ntohl (mp->thread_data[i].core),
+ ntohl (mp->thread_data[i].cpu_socket));
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+}
+
+static void vl_api_show_threads_reply_t_handler_json
+ (vl_api_show_threads_reply_t * mp)
+{
+ vat_main_t *vam = &vat_main;
+ vat_json_node_t node;
+ vl_api_thread_data_t *td;
+ int i, count = ntohl (mp->count);
+
+ vat_json_init_object (&node);
+ vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
+ vat_json_object_add_uint (&node, "count", count);
+
+ for (i = 0; i < count; i++)
+ {
+ td = &mp->thread_data[i];
+ vat_json_object_add_uint (&node, "id", ntohl (td->id));
+ vat_json_object_add_string_copy (&node, "name", td->name);
+ vat_json_object_add_string_copy (&node, "type", td->type);
+ vat_json_object_add_uint (&node, "pid", ntohl (td->pid));
+ vat_json_object_add_int (&node, "cpu_id", ntohl (td->cpu_id));
+ vat_json_object_add_int (&node, "core", ntohl (td->id));
+ vat_json_object_add_int (&node, "cpu_socket", ntohl (td->cpu_socket));
+ }
+
+ vat_json_print (vam->ofp, &node);
+ vat_json_free (&node);
+
+ vam->retval = ntohl (mp->retval);
+ vam->result_ready = 1;
+}
+
+static int
+api_show_threads (vat_main_t * vam)
+{
+ vl_api_show_threads_t *mp;
+ int ret;
+
+ print (vam->ofp,
+ "\n%-2s %-11s %-11s %-5s %-6s %-4s %-6s",
+ "ID", "Name", "Type", "LWP", "cpu_id", "Core", "Socket");
+
+ M (SHOW_THREADS, mp);
+
+ S (mp);
+ W (ret);
+ return ret;
+}
+
static void
vl_api_ip4_arp_event_t_handler (vl_api_ip4_arp_event_t * mp)
{
@@ -5655,6 +5725,7 @@
_(MODIFY_VHOST_USER_IF_REPLY, modify_vhost_user_if_reply) \
_(DELETE_VHOST_USER_IF_REPLY, delete_vhost_user_if_reply) \
_(SHOW_VERSION_REPLY, show_version_reply) \
+_(SHOW_THREADS_REPLY, show_threads_reply) \
_(L2_FIB_TABLE_DETAILS, l2_fib_table_details) \
_(VXLAN_GPE_ADD_DEL_TUNNEL_REPLY, vxlan_gpe_add_del_tunnel_reply) \
_(VXLAN_GPE_TUNNEL_DETAILS, vxlan_gpe_tunnel_details) \
@@ -23742,6 +23813,7 @@
_(delete_vhost_user_if, "<intfc> | sw_if_index <nn>") \
_(sw_interface_vhost_user_dump, "") \
_(show_version, "") \
+_(show_threads, "") \
_(vxlan_gpe_add_del_tunnel, \
"local <addr> remote <addr> | group <mcast-ip-addr>\n" \
"{ <intfc> | mcast_sw_if_index <nn> } }\n" \
diff --git a/src/vlib/node_cli.c b/src/vlib/node_cli.c
index 3dbf672..00199d9 100644
--- a/src/vlib/node_cli.c
+++ b/src/vlib/node_cli.c
@@ -345,9 +345,9 @@
if (j > 0)
vlib_cli_output (vm, "---------------");
- if (w->lcore_id > -1)
+ if (w->cpu_id > -1)
vlib_cli_output (vm, "Thread %d %s (lcore %u)", j, w->name,
- w->lcore_id);
+ w->cpu_id);
else
vlib_cli_output (vm, "Thread %d %s", j, w->name);
}
diff --git a/src/vlib/threads.c b/src/vlib/threads.c
index def8927..055998a 100644
--- a/src/vlib/threads.c
+++ b/src/vlib/threads.c
@@ -17,6 +17,7 @@
#include <signal.h>
#include <math.h>
#include <vppinfra/format.h>
+#include <vppinfra/linux/sysfs.h>
#include <vlib/vlib.h>
#include <vlib/threads.h>
@@ -305,7 +306,7 @@
w = vlib_worker_threads;
w->thread_mheap = clib_mem_get_heap ();
w->thread_stack = vlib_thread_stacks[0];
- w->lcore_id = tm->main_lcore;
+ w->cpu_id = tm->main_lcore;
w->lwp = syscall (SYS_gettid);
w->thread_id = pthread_self ();
tm->n_vlib_mains = 1;
@@ -600,21 +601,42 @@
return rv;
}
+static void
+vlib_get_thread_core_socket (vlib_worker_thread_t * w, unsigned cpu_id)
+{
+ const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
+ u8 *p = 0;
+ int core_id = -1, socket_id = -1;
+
+ p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0);
+ clib_sysfs_read ((char *) p, "%d", &core_id);
+ vec_reset_length (p);
+ p =
+ format (p, "%s%u/topology/physical_package_id%c", sys_cpu_path, cpu_id,
+ 0);
+ clib_sysfs_read ((char *) p, "%d", &socket_id);
+ vec_free (p);
+
+ w->core_id = core_id;
+ w->socket_id = socket_id;
+}
+
static clib_error_t *
-vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned lcore_id)
+vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
{
vlib_thread_main_t *tm = &vlib_thread_main;
void *(*fp_arg) (void *) = fp;
- w->lcore_id = lcore_id;
+ w->cpu_id = cpu_id;
+ vlib_get_thread_core_socket (w, cpu_id);
if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads)
- return tm->cb.vlib_launch_thread_cb (fp, (void *) w, lcore_id);
+ return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id);
else
{
pthread_t worker;
cpu_set_t cpuset;
CPU_ZERO (&cpuset);
- CPU_SET (lcore_id, &cpuset);
+ CPU_SET (cpu_id, &cpuset);
if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w))
return clib_error_return_unix (0, "pthread_create");
diff --git a/src/vlib/threads.h b/src/vlib/threads.h
index 7de0412..71b5d0c 100644
--- a/src/vlib/threads.h
+++ b/src/vlib/threads.h
@@ -108,7 +108,9 @@
volatile u32 *node_reforks_required;
long lwp;
- int lcore_id;
+ int cpu_id;
+ int core_id;
+ int socket_id;
pthread_t thread_id;
} vlib_worker_thread_t;
@@ -270,8 +272,8 @@
typedef struct
{
clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w,
- unsigned lcore_id);
- clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 lcore);
+ unsigned cpu_id);
+ clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 cpu);
} vlib_thread_callbacks_t;
typedef struct
diff --git a/src/vlib/threads_cli.c b/src/vlib/threads_cli.c
index bb9ddbc..a47d864 100644
--- a/src/vlib/threads_cli.c
+++ b/src/vlib/threads_cli.c
@@ -15,7 +15,6 @@
#define _GNU_SOURCE
#include <vppinfra/format.h>
-#include <vppinfra/linux/sysfs.h>
#include <vlib/vlib.h>
#include <vlib/threads.h>
@@ -64,57 +63,16 @@
line = format (line, "%-25U", format_sched_policy_and_priority, w->lwp);
- int lcore = -1;
- cpu_set_t cpuset;
- CPU_ZERO (&cpuset);
- int ret = -1;
-
- ret =
- pthread_getaffinity_np (w->thread_id, sizeof (cpu_set_t), &cpuset);
- if (!ret)
+ int cpu_id = w->cpu_id;
+ if (cpu_id > -1)
{
- int c;
- for (c = 0; c < CPU_SETSIZE; c++)
- if (CPU_ISSET (c, &cpuset))
- {
- if (lcore > -1)
- {
- lcore = -2;
- break;
- }
- lcore = c;
- }
+ int core_id = w->core_id;
+ int socket_id = w->socket_id;
+ line = format (line, "%-7u%-7u%-7u%", cpu_id, core_id, socket_id);
}
else
{
- lcore = w->lcore_id;
- }
-
- if (lcore > -1)
- {
- const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
- int socket_id = -1;
- int core_id = -1;
- u8 *p = 0;
-
- p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, lcore, 0);
- clib_sysfs_read ((char *) p, "%d", &core_id);
-
- vec_reset_length (p);
- p =
- format (p,
- "%s%u/topology/physical_package_id%c",
- sys_cpu_path, lcore, 0);
- clib_sysfs_read ((char *) p, "%d", &socket_id);
- vec_free (p);
-
- line = format (line, "%-7u%-7u%-7u%", lcore, core_id, socket_id);
- }
- else
- {
- line =
- format (line, "%-7s%-7s%-7s%", (lcore == -2) ? "M" : "n/a", "n/a",
- "n/a");
+ line = format (line, "%-7s%-7s%-7s%", "n/a", "n/a", "n/a");
}
vlib_cli_output (vm, "%v", line);
diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c
index d69b84f..8686509 100644
--- a/src/vpp/api/api.c
+++ b/src/vpp/api/api.c
@@ -2,7 +2,7 @@
*------------------------------------------------------------------
* api.c - message handler registration
*
- * Copyright (c) 2010-2016 Cisco and/or its affiliates.
+ * Copyright (c) 2010-2018 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -80,6 +80,7 @@
_(GET_NODE_INDEX, get_node_index) \
_(ADD_NODE_NEXT, add_node_next) \
_(SHOW_VERSION, show_version) \
+_(SHOW_THREADS, show_threads) \
_(GET_NODE_GRAPH, get_node_graph) \
_(GET_NEXT_INDEX, get_next_index) \
@@ -255,6 +256,69 @@
}
static void
+get_thread_data (vl_api_thread_data_t * td, int index)
+{
+ vlib_worker_thread_t *w = vlib_worker_threads + index;
+ td->id = htonl (index);
+ if (w->name)
+ strncpy ((char *) td->name, (char *) w->name, ARRAY_LEN (td->name) - 1);
+ if (w->registration)
+ strncpy ((char *) td->type, (char *) w->registration->name,
+ ARRAY_LEN (td->type) - 1);
+ td->pid = htonl (w->lwp);
+ td->cpu_id = htonl (w->cpu_id);
+ td->core = htonl (w->core_id);
+ td->cpu_socket = htonl (w->socket_id);
+}
+
+static void
+vl_api_show_threads_t_handler (vl_api_show_threads_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ int rv = 0, count = 0;
+
+#if !defined(__powerpc64__)
+ vl_api_registration_t *reg;
+ vl_api_show_threads_reply_t *rmp;
+ vl_api_thread_data_t *td;
+ int i, msg_size = 0;
+ count = vec_len (vlib_worker_threads);
+ if (!count)
+ return;
+
+ msg_size = sizeof (*rmp) + sizeof (rmp->thread_data[0]) * count;
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ rmp = vl_msg_api_alloc (msg_size);
+ memset (rmp, 0, msg_size);
+ rmp->_vl_msg_id = htons (VL_API_SHOW_THREADS_REPLY);
+ rmp->context = mp->context;
+ rmp->count = htonl (count);
+ td = rmp->thread_data;
+
+ for (i = 0; i < count; i++)
+ {
+ get_thread_data (&td[i], i);
+ }
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+#else
+
+ /* unimplemented support */
+ rv = -9;
+ clib_warning ("power pc does not support show threads api");
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_SHOW_THREADS_REPLY,
+ ({
+ rmp->count = htonl(count);
+ }));
+ /* *INDENT-ON* */
+#endif
+}
+
+static void
vl_api_get_node_index_t_handler (vl_api_get_node_index_t * mp)
{
vlib_main_t *vm = vlib_get_main ();
diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c
index baa675c..aa63566 100644
--- a/src/vpp/api/custom_dump.c
+++ b/src/vpp/api/custom_dump.c
@@ -2154,6 +2154,16 @@
FINISH;
}
+static void *vl_api_show_threads_t_print
+ (vl_api_show_threads_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: show_threads ");
+
+ FINISH;
+}
+
static void *vl_api_vxlan_gpe_add_del_tunnel_t_print
(vl_api_vxlan_gpe_add_del_tunnel_t * mp, void *handle)
{
diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api
index 2eb14bc..488af17 100644
--- a/src/vpp/api/vpe.api
+++ b/src/vpp/api/vpe.api
@@ -19,7 +19,7 @@
called through a shared memory interface.
*/
-option version = "1.0.0";
+option version = "1.1.0";
/*
* Note: API placement cleanup in progress
@@ -193,6 +193,54 @@
u8 build_directory[256];
};
+
+/** \brief show_threads display the information about vpp
+ threads running on system along with their process id,
+ cpu id, physical core and cpu socket.
+*/
+define show_threads
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief thread data
+ @param id - thread index
+ @param name - thread name i.e. vpp_main or vpp_wk_0
+ @param type - thread type i.e. workers or stats
+ @param pid - thread Process Id
+ @param cpu_id - thread pinned to cpu.
+ "CPUs or Logical cores are the number of physical cores times
+ the number of threads that can run on each core through
+ the use of hyperthreading." (from unix.stackexchange.com)
+ @param core - thread pinned to actual physical core.
+ @param cpu_socket - thread is running on which cpu socket.
+*/
+typeonly define thread_data
+{
+ u32 id;
+ u8 name[64];
+ u8 type[64];
+ u32 pid;
+ u32 cpu_id;
+ u32 core;
+ u32 cpu_socket;
+};
+
+/** \brief show_threads_reply
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param count - number of threads in thread_data array
+ @param thread_data - array of thread data
+*/
+define show_threads_reply
+{
+ u32 context;
+ i32 retval;
+ u32 count;
+ vl_api_thread_data_t thread_data[count];
+};
+
define get_node_graph
{
u32 client_index;