blob: de31221f6f48c78bda13e24933de0652ea9f8028 [file] [log] [blame]
Dave Barach4d1a8662018-09-10 12:31:15 -04001/*
2 * perfmon_periodic.c - skeleton plug-in periodic function
3 *
4 * Copyright (c) <current-year> <your-organization>
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vppinfra/error.h>
20#include <perfmon/perfmon.h>
21#include <asm/unistd.h>
22#include <sys/ioctl.h>
23
Dave Barach53fe4a72019-01-26 09:50:26 -050024/* "not in glibc" */
Dave Barach4d1a8662018-09-10 12:31:15 -040025static long
26perf_event_open (struct perf_event_attr *hw_event, pid_t pid, int cpu,
27 int group_fd, unsigned long flags)
28{
29 int ret;
30
31 ret = syscall (__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
32 return ret;
33}
34
Dave Barachec595ef2019-01-24 10:34:24 -050035static void
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000036read_current_perf_counters (vlib_node_runtime_perf_callback_data_t * data,
37 vlib_node_runtime_perf_callback_args_t * args)
Dave Barach4d1a8662018-09-10 12:31:15 -040038{
Dave Barachec595ef2019-01-24 10:34:24 -050039 int i;
Dave Barachec595ef2019-01-24 10:34:24 -050040 perfmon_main_t *pm = &perfmon_main;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000041 perfmon_thread_t *pt = data->u[0].v;
42 u64 c[2] = { 0, 0 };
43 u64 *cc;
Dave Barachec595ef2019-01-24 10:34:24 -050044
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000045 if (PREDICT_FALSE (args->call_type == VLIB_NODE_RUNTIME_PERF_RESET))
46 return;
47
48 if (args->call_type == VLIB_NODE_RUNTIME_PERF_BEFORE)
49 cc = pt->c;
50 else
51 cc = c;
Dave Barachec595ef2019-01-24 10:34:24 -050052
53 for (i = 0; i < pm->n_active; i++)
Dave Barach4d1a8662018-09-10 12:31:15 -040054 {
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000055 if (pt->rdpmc_indices[i] != ~0)
56 cc[i] = clib_rdpmc ((int) pt->rdpmc_indices[i]);
Dave Barachec595ef2019-01-24 10:34:24 -050057 else
Dave Barach4d1a8662018-09-10 12:31:15 -040058 {
Dave Barachec595ef2019-01-24 10:34:24 -050059 u64 sw_value;
Dave Barach5f2cfb22019-05-20 10:28:57 -040060 int read_result;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000061 if ((read_result = read (pt->pm_fds[i], &sw_value,
62 sizeof (sw_value))) != sizeof (sw_value))
Dave Barachec595ef2019-01-24 10:34:24 -050063 {
64 clib_unix_warning
Dave Barach5f2cfb22019-05-20 10:28:57 -040065 ("counter read returned %d, expected %d",
66 read_result, sizeof (sw_value));
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000067 clib_callback_data_enable_disable
68 (&args->vm->vlib_node_runtime_perf_callbacks,
Dave Barach5f2cfb22019-05-20 10:28:57 -040069 read_current_perf_counters, 0 /* enable */ );
Dave Barachec595ef2019-01-24 10:34:24 -050070 return;
71 }
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000072 cc[i] = sw_value;
Dave Barach4d1a8662018-09-10 12:31:15 -040073 }
Dave Barach4d1a8662018-09-10 12:31:15 -040074 }
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000075
76 if (args->call_type == VLIB_NODE_RUNTIME_PERF_AFTER)
77 {
78 u32 node_index = args->node->node_index;
79 vec_validate (pt->counters, node_index);
80 pt->counters[node_index].ticks[0] += c[0] - pt->c[0];
81 pt->counters[node_index].ticks[1] += c[1] - pt->c[1];
82 pt->counters[node_index].vectors += args->packets;
83 }
Dave Barach4d1a8662018-09-10 12:31:15 -040084}
85
86static void
87clear_counters (perfmon_main_t * pm)
88{
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000089 int j;
Dave Barach4d1a8662018-09-10 12:31:15 -040090 vlib_main_t *vm = pm->vlib_main;
91 vlib_main_t *stat_vm;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000092 perfmon_thread_t *pt;
93 u32 len;
94
Dave Barach4d1a8662018-09-10 12:31:15 -040095
96 vlib_worker_thread_barrier_sync (vm);
97
98 for (j = 0; j < vec_len (vlib_mains); j++)
99 {
100 stat_vm = vlib_mains[j];
101 if (stat_vm == 0)
102 continue;
103
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000104 pt = pm->threads[j];
105 len = vec_len (pt->counters);
106 if (!len)
107 continue;
Dave Barach4d1a8662018-09-10 12:31:15 -0400108
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000109 clib_memset (pt->counters, 0, len * sizeof (pt->counters[0]));
Dave Barach4d1a8662018-09-10 12:31:15 -0400110 }
111 vlib_worker_thread_barrier_release (vm);
112}
113
114static void
Dave Barachec595ef2019-01-24 10:34:24 -0500115enable_current_events (perfmon_main_t * pm)
Dave Barach4d1a8662018-09-10 12:31:15 -0400116{
117 struct perf_event_attr pe;
118 int fd;
119 struct perf_event_mmap_page *p = 0;
120 perfmon_event_config_t *c;
121 vlib_main_t *vm = vlib_get_main ();
122 u32 my_thread_index = vm->thread_index;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000123 perfmon_thread_t *pt = pm->threads[my_thread_index];
Dave Barachec595ef2019-01-24 10:34:24 -0500124 u32 index;
125 int i, limit = 1;
Dave Barach53fe4a72019-01-26 09:50:26 -0500126 int cpu;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000127 vlib_node_runtime_perf_callback_data_t cbdata = { 0 };
128 cbdata.fp = read_current_perf_counters;
129 cbdata.u[0].v = pt;
130 cbdata.u[1].v = vm;
Dave Barach4d1a8662018-09-10 12:31:15 -0400131
Dave Barachec595ef2019-01-24 10:34:24 -0500132 if ((pm->current_event + 1) < vec_len (pm->single_events_to_collect))
133 limit = 2;
Dave Barach4d1a8662018-09-10 12:31:15 -0400134
Dave Barachec595ef2019-01-24 10:34:24 -0500135 for (i = 0; i < limit; i++)
Dave Barach4d1a8662018-09-10 12:31:15 -0400136 {
Dave Barachec595ef2019-01-24 10:34:24 -0500137 c = vec_elt_at_index (pm->single_events_to_collect,
138 pm->current_event + i);
Dave Barach4d1a8662018-09-10 12:31:15 -0400139
Dave Barachec595ef2019-01-24 10:34:24 -0500140 memset (&pe, 0, sizeof (struct perf_event_attr));
141 pe.type = c->pe_type;
142 pe.size = sizeof (struct perf_event_attr);
143 pe.config = c->pe_config;
144 pe.disabled = 1;
145 pe.pinned = 1;
146 /*
147 * Note: excluding the kernel makes the
148 * (software) context-switch counter read 0...
149 */
150 if (pe.type != PERF_TYPE_SOFTWARE)
Dave Barach4d1a8662018-09-10 12:31:15 -0400151 {
Dave Barachec595ef2019-01-24 10:34:24 -0500152 /* Exclude kernel and hypervisor */
153 pe.exclude_kernel = 1;
154 pe.exclude_hv = 1;
155 }
156
Damjan Marionee721412019-01-27 17:54:11 +0100157 cpu = vm->cpu_id;
Dave Barach53fe4a72019-01-26 09:50:26 -0500158
159 fd = perf_event_open (&pe, 0, cpu, -1, 0);
Dave Barachec595ef2019-01-24 10:34:24 -0500160 if (fd == -1)
161 {
162 clib_unix_warning ("event open: type %d config %d", c->pe_type,
163 c->pe_config);
Dave Barach4d1a8662018-09-10 12:31:15 -0400164 return;
165 }
Dave Barachec595ef2019-01-24 10:34:24 -0500166
167 if (pe.type != PERF_TYPE_SOFTWARE)
168 {
169 p = mmap (0, pm->page_size, PROT_READ, MAP_SHARED, fd, 0);
170 if (p == MAP_FAILED)
171 {
172 clib_unix_warning ("mmap");
173 close (fd);
174 return;
175 }
Benoît Ganne7176b802019-12-16 15:26:49 +0100176 CLIB_MEM_UNPOISON (p, pm->page_size);
Dave Barachec595ef2019-01-24 10:34:24 -0500177 }
178 else
179 p = 0;
180
Damjan Marion69a1d642019-01-24 20:24:33 +0100181 if (ioctl (fd, PERF_EVENT_IOC_RESET, 0) < 0)
182 clib_unix_warning ("reset ioctl");
183
184 if (ioctl (fd, PERF_EVENT_IOC_ENABLE, 0) < 0)
185 clib_unix_warning ("enable ioctl");
186
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000187 pt->perf_event_pages[i] = (void *) p;
188 pt->pm_fds[i] = fd;
Su Wang7297f472019-03-21 00:14:14 -0400189 }
190
191 /*
192 * Hardware events must be all opened and enabled before aquiring
193 * pmc indices, otherwise the pmc indices might be out-dated.
194 */
195 for (i = 0; i < limit; i++)
196 {
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000197 p = (struct perf_event_mmap_page *) pt->perf_event_pages[i];
Su Wang7297f472019-03-21 00:14:14 -0400198
Dave Barachec595ef2019-01-24 10:34:24 -0500199 /*
200 * Software event counters - and others not capable of being
201 * read via the "rdpmc" instruction - will be read
202 * by system calls.
203 */
Su Wang7297f472019-03-21 00:14:14 -0400204 if (p == 0 || p->cap_user_rdpmc == 0)
Dave Barachec595ef2019-01-24 10:34:24 -0500205 index = ~0;
206 else
207 index = p->index - 1;
208
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000209 pt->rdpmc_indices[i] = index;
Dave Barach4d1a8662018-09-10 12:31:15 -0400210 }
211
Dave Barachec595ef2019-01-24 10:34:24 -0500212 pm->n_active = i;
Dave Barach4d1a8662018-09-10 12:31:15 -0400213 /* Enable the main loop counter snapshot mechanism */
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000214 clib_callback_data_add (&vm->vlib_node_runtime_perf_callbacks, cbdata);
Dave Barach4d1a8662018-09-10 12:31:15 -0400215}
216
217static void
Dave Barachec595ef2019-01-24 10:34:24 -0500218disable_events (perfmon_main_t * pm)
Dave Barach4d1a8662018-09-10 12:31:15 -0400219{
220 vlib_main_t *vm = vlib_get_main ();
221 u32 my_thread_index = vm->thread_index;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000222 perfmon_thread_t *pt = pm->threads[my_thread_index];
Dave Barachec595ef2019-01-24 10:34:24 -0500223 int i;
Dave Barach4d1a8662018-09-10 12:31:15 -0400224
225 /* Stop main loop collection */
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000226 clib_callback_data_remove (&vm->vlib_node_runtime_perf_callbacks,
227 read_current_perf_counters);
Dave Barach4d1a8662018-09-10 12:31:15 -0400228
Dave Barachec595ef2019-01-24 10:34:24 -0500229 for (i = 0; i < pm->n_active; i++)
230 {
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000231 if (pt->pm_fds[i] == 0)
Dave Barachec595ef2019-01-24 10:34:24 -0500232 continue;
Dave Barach4d1a8662018-09-10 12:31:15 -0400233
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000234 if (ioctl (pt->pm_fds[i], PERF_EVENT_IOC_DISABLE, 0) < 0)
Dave Barachec595ef2019-01-24 10:34:24 -0500235 clib_unix_warning ("disable ioctl");
Dave Barach4d1a8662018-09-10 12:31:15 -0400236
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000237 if (pt->perf_event_pages[i])
Benoît Ganne7176b802019-12-16 15:26:49 +0100238 {
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000239 if (munmap (pt->perf_event_pages[i], pm->page_size) < 0)
Benoît Ganne7176b802019-12-16 15:26:49 +0100240 clib_unix_warning ("munmap");
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000241 pt->perf_event_pages[i] = 0;
Benoît Ganne7176b802019-12-16 15:26:49 +0100242 }
Dave Barachec595ef2019-01-24 10:34:24 -0500243
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000244 (void) close (pt->pm_fds[i]);
245 pt->pm_fds[i] = 0;
Dave Barachec595ef2019-01-24 10:34:24 -0500246 }
Dave Barach4d1a8662018-09-10 12:31:15 -0400247}
248
249static void
250worker_thread_start_event (vlib_main_t * vm)
251{
252 perfmon_main_t *pm = &perfmon_main;
253
Dave Barach5f2cfb22019-05-20 10:28:57 -0400254 clib_callback_enable_disable (vm->worker_thread_main_loop_callbacks,
255 vm->worker_thread_main_loop_callback_tmp,
256 vm->worker_thread_main_loop_callback_lock,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000257 worker_thread_start_event, 0 /* disable */ );
Dave Barachec595ef2019-01-24 10:34:24 -0500258 enable_current_events (pm);
Dave Barach4d1a8662018-09-10 12:31:15 -0400259}
260
261static void
262worker_thread_stop_event (vlib_main_t * vm)
263{
264 perfmon_main_t *pm = &perfmon_main;
Dave Barach5f2cfb22019-05-20 10:28:57 -0400265 clib_callback_enable_disable (vm->worker_thread_main_loop_callbacks,
266 vm->worker_thread_main_loop_callback_tmp,
267 vm->worker_thread_main_loop_callback_lock,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000268 worker_thread_stop_event, 0 /* disable */ );
Dave Barachec595ef2019-01-24 10:34:24 -0500269 disable_events (pm);
Dave Barach4d1a8662018-09-10 12:31:15 -0400270}
271
272static void
273start_event (perfmon_main_t * pm, f64 now, uword event_data)
274{
275 int i;
Dave Barach53fe4a72019-01-26 09:50:26 -0500276 int last_set;
277 int all = 0;
Dave Barach4d1a8662018-09-10 12:31:15 -0400278 pm->current_event = 0;
Dave Barach53fe4a72019-01-26 09:50:26 -0500279
Dave Barachec595ef2019-01-24 10:34:24 -0500280 if (vec_len (pm->single_events_to_collect) == 0)
Dave Barach4d1a8662018-09-10 12:31:15 -0400281 {
282 pm->state = PERFMON_STATE_OFF;
283 return;
284 }
Dave Barach53fe4a72019-01-26 09:50:26 -0500285
286 last_set = clib_bitmap_last_set (pm->thread_bitmap);
287 all = (last_set == ~0);
288
Dave Barach4d1a8662018-09-10 12:31:15 -0400289 pm->state = PERFMON_STATE_RUNNING;
290 clear_counters (pm);
291
Dave Barach53fe4a72019-01-26 09:50:26 -0500292 /* Start collection on thread 0? */
293 if (all || clib_bitmap_get (pm->thread_bitmap, 0))
294 {
295 /* Start collection on this thread */
296 enable_current_events (pm);
297 }
Dave Barach4d1a8662018-09-10 12:31:15 -0400298
299 /* And also on worker threads */
300 for (i = 1; i < vec_len (vlib_mains); i++)
301 {
302 if (vlib_mains[i] == 0)
303 continue;
Dave Barach53fe4a72019-01-26 09:50:26 -0500304
305 if (all || clib_bitmap_get (pm->thread_bitmap, i))
Dave Barach5f2cfb22019-05-20 10:28:57 -0400306 clib_callback_enable_disable
307 (vlib_mains[i]->worker_thread_main_loop_callbacks,
308 vlib_mains[i]->worker_thread_main_loop_callback_tmp,
309 vlib_mains[i]->worker_thread_main_loop_callback_lock,
310 (void *) worker_thread_start_event, 1 /* enable */ );
Dave Barach4d1a8662018-09-10 12:31:15 -0400311 }
312}
313
314void
315scrape_and_clear_counters (perfmon_main_t * pm)
316{
Dave Barachec595ef2019-01-24 10:34:24 -0500317 int i, j, k;
Dave Barach4d1a8662018-09-10 12:31:15 -0400318 vlib_main_t *vm = pm->vlib_main;
319 vlib_main_t *stat_vm;
320 vlib_node_main_t *nm;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000321 perfmon_counters_t *ctr;
322 perfmon_counters_t *ctrs;
323 perfmon_counters_t **ctr_dups = 0;
324 perfmon_thread_t *pt;
Dave Barach4d1a8662018-09-10 12:31:15 -0400325 perfmon_capture_t *c;
326 perfmon_event_config_t *current_event;
327 uword *p;
328 u8 *counter_name;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000329 u32 len;
Dave Barach4d1a8662018-09-10 12:31:15 -0400330
331 /* snapshoot the nodes, including pm counters */
332 vlib_worker_thread_barrier_sync (vm);
333
334 for (j = 0; j < vec_len (vlib_mains); j++)
335 {
336 stat_vm = vlib_mains[j];
337 if (stat_vm == 0)
338 continue;
339
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000340 pt = pm->threads[j];
341 len = vec_len (pt->counters);
342 ctrs = 0;
343 if (len)
Dave Barach4d1a8662018-09-10 12:31:15 -0400344 {
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000345 vec_validate (ctrs, len - 1);
346 clib_memcpy (ctrs, pt->counters, len * sizeof (pt->counters[0]));
347 clib_memset (pt->counters, 0, len * sizeof (pt->counters[0]));
Dave Barach4d1a8662018-09-10 12:31:15 -0400348 }
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000349 vec_add1 (ctr_dups, ctrs);
Dave Barach4d1a8662018-09-10 12:31:15 -0400350 }
351
352 vlib_worker_thread_barrier_release (vm);
353
Dave Barach4d1a8662018-09-10 12:31:15 -0400354 for (j = 0; j < vec_len (vlib_mains); j++)
355 {
356 stat_vm = vlib_mains[j];
357 if (stat_vm == 0)
358 continue;
359
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000360 pt = pm->threads[j];
361 ctrs = ctr_dups[j];
Dave Barach4d1a8662018-09-10 12:31:15 -0400362
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000363 for (i = 0; i < vec_len (ctrs); i++)
Dave Barach4d1a8662018-09-10 12:31:15 -0400364 {
365 u8 *capture_name;
366
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000367 ctr = &ctrs[i];
368 nm = &stat_vm->node_main;
Dave Barachec595ef2019-01-24 10:34:24 -0500369
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000370 if (ctr->ticks[0] == 0 && ctr->ticks[1] == 0)
371 continue;
Dave Barachec595ef2019-01-24 10:34:24 -0500372
373 for (k = 0; k < 2; k++)
Dave Barach4d1a8662018-09-10 12:31:15 -0400374 {
Dave Barachec595ef2019-01-24 10:34:24 -0500375 /*
376 * We collect 2 counters at once, except for the
377 * last counter when the user asks for an odd number of
378 * counters
379 */
380 if ((pm->current_event + k)
381 >= vec_len (pm->single_events_to_collect))
382 break;
383
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000384 capture_name = format (0, "t%d-%v%c", j, nm->nodes[i]->name, 0);
Dave Barachec595ef2019-01-24 10:34:24 -0500385
386 p = hash_get_mem (pm->capture_by_thread_and_node_name,
387 capture_name);
388
389 if (p == 0)
390 {
391 pool_get (pm->capture_pool, c);
392 memset (c, 0, sizeof (*c));
393 c->thread_and_node_name = capture_name;
394 hash_set_mem (pm->capture_by_thread_and_node_name,
395 capture_name, c - pm->capture_pool);
396 }
397 else
398 {
399 c = pool_elt_at_index (pm->capture_pool, p[0]);
400 vec_free (capture_name);
401 }
402
403 /* Snapshoot counters, etc. into the capture */
404 current_event = pm->single_events_to_collect
405 + pm->current_event + k;
406 counter_name = (u8 *) current_event->name;
Dave Barachec595ef2019-01-24 10:34:24 -0500407
408 vec_add1 (c->counter_names, counter_name);
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000409 vec_add1 (c->counter_values, ctr->ticks[k]);
410 vec_add1 (c->vectors_this_counter, ctr->vectors);
Dave Barach4d1a8662018-09-10 12:31:15 -0400411 }
Dave Barach4d1a8662018-09-10 12:31:15 -0400412 }
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000413 vec_free (ctrs);
Dave Barach4d1a8662018-09-10 12:31:15 -0400414 }
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000415 vec_free (ctr_dups);
Dave Barach4d1a8662018-09-10 12:31:15 -0400416}
417
418static void
Dave Barach53fe4a72019-01-26 09:50:26 -0500419handle_timeout (vlib_main_t * vm, perfmon_main_t * pm, f64 now)
Dave Barach4d1a8662018-09-10 12:31:15 -0400420{
421 int i;
Dave Barach53fe4a72019-01-26 09:50:26 -0500422 int last_set, all;
423
424 last_set = clib_bitmap_last_set (pm->thread_bitmap);
425 all = (last_set == ~0);
426
427 if (all || clib_bitmap_get (pm->thread_bitmap, 0))
428 disable_events (pm);
Dave Barach4d1a8662018-09-10 12:31:15 -0400429
430 /* And also on worker threads */
431 for (i = 1; i < vec_len (vlib_mains); i++)
432 {
433 if (vlib_mains[i] == 0)
434 continue;
Dave Barach53fe4a72019-01-26 09:50:26 -0500435 if (all || clib_bitmap_get (pm->thread_bitmap, i))
Dave Barach5f2cfb22019-05-20 10:28:57 -0400436 clib_callback_enable_disable
437 (vlib_mains[i]->worker_thread_main_loop_callbacks,
438 vlib_mains[i]->worker_thread_main_loop_callback_tmp,
439 vlib_mains[i]->worker_thread_main_loop_callback_lock,
440 (void *) worker_thread_stop_event, 1 /* enable */ );
Dave Barach4d1a8662018-09-10 12:31:15 -0400441 }
442
Dave Barach53fe4a72019-01-26 09:50:26 -0500443 /* Make sure workers have stopped collection */
Dave Barach4d1a8662018-09-10 12:31:15 -0400444 if (i > 1)
Dave Barach53fe4a72019-01-26 09:50:26 -0500445 {
446 f64 deadman = vlib_time_now (vm) + 1.0;
447
448 for (i = 1; i < vec_len (vlib_mains); i++)
449 {
450 /* Has the worker actually stopped collecting data? */
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000451 while (clib_callback_data_is_set
452 (&vm->vlib_node_runtime_perf_callbacks,
Dave Barach5f2cfb22019-05-20 10:28:57 -0400453 read_current_perf_counters))
Dave Barach53fe4a72019-01-26 09:50:26 -0500454 {
455 if (vlib_time_now (vm) > deadman)
456 {
457 clib_warning ("Thread %d deadman timeout!", i);
458 break;
459 }
460 vlib_process_suspend (pm->vlib_main, 1e-3);
461 }
462 }
463 }
Dave Barach4d1a8662018-09-10 12:31:15 -0400464 scrape_and_clear_counters (pm);
Dave Barachec595ef2019-01-24 10:34:24 -0500465 pm->current_event += pm->n_active;
466 if (pm->current_event >= vec_len (pm->single_events_to_collect))
Dave Barach4d1a8662018-09-10 12:31:15 -0400467 {
468 pm->current_event = 0;
469 pm->state = PERFMON_STATE_OFF;
470 return;
471 }
Dave Barach53fe4a72019-01-26 09:50:26 -0500472
473 if (all || clib_bitmap_get (pm->thread_bitmap, 0))
474 enable_current_events (pm);
Dave Barach4d1a8662018-09-10 12:31:15 -0400475
476 /* And also on worker threads */
477 for (i = 1; i < vec_len (vlib_mains); i++)
478 {
479 if (vlib_mains[i] == 0)
480 continue;
Dave Barach53fe4a72019-01-26 09:50:26 -0500481 if (all || clib_bitmap_get (pm->thread_bitmap, i))
Dave Barach5f2cfb22019-05-20 10:28:57 -0400482 clib_callback_enable_disable
483 (vlib_mains[i]->worker_thread_main_loop_callbacks,
484 vlib_mains[i]->worker_thread_main_loop_callback_tmp,
485 vlib_mains[i]->worker_thread_main_loop_callback_lock,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000486 worker_thread_start_event, 0 /* disable */ );
Dave Barach4d1a8662018-09-10 12:31:15 -0400487 }
488}
489
490static uword
491perfmon_periodic_process (vlib_main_t * vm,
492 vlib_node_runtime_t * rt, vlib_frame_t * f)
493{
494 perfmon_main_t *pm = &perfmon_main;
495 f64 now;
496 uword *event_data = 0;
497 uword event_type;
498 int i;
499
500 while (1)
501 {
502 if (pm->state == PERFMON_STATE_RUNNING)
503 vlib_process_wait_for_event_or_clock (vm, pm->timeout_interval);
504 else
505 vlib_process_wait_for_event (vm);
506
507 now = vlib_time_now (vm);
508
509 event_type = vlib_process_get_events (vm, (uword **) & event_data);
510
511 switch (event_type)
512 {
513 case PERFMON_START:
514 for (i = 0; i < vec_len (event_data); i++)
515 start_event (pm, now, event_data[i]);
516 break;
517
518 /* Handle timeout */
519 case ~0:
Dave Barach53fe4a72019-01-26 09:50:26 -0500520 handle_timeout (vm, pm, now);
Dave Barach4d1a8662018-09-10 12:31:15 -0400521 break;
522
523 default:
524 clib_warning ("Unexpected event %d", event_type);
525 break;
526 }
527 vec_reset_length (event_data);
528 }
529 return 0; /* or not */
530}
531
532/* *INDENT-OFF* */
533VLIB_REGISTER_NODE (perfmon_periodic_node) =
534{
535 .function = perfmon_periodic_process,
536 .type = VLIB_NODE_TYPE_PROCESS,
537 .name = "perfmon-periodic-process",
538};
539/* *INDENT-ON* */
540
541/*
542 * fd.io coding-style-patch-verification: ON
543 *
544 * Local Variables:
545 * eval: (c-set-style "gnu")
546 * End:
547 */