blob: bbcb4ec2979aab17e1ab20123e6723304d47eec6 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
Christophe Fontainefef15b42016-04-09 12:38:49 +090015#define _GNU_SOURCE
Christophe Fontainefef15b42016-04-09 12:38:49 +090016
Ed Warnickecb9cada2015-12-08 15:45:58 -070017#include <signal.h>
18#include <math.h>
Tom Jonesd346b882024-01-29 10:55:09 +000019#ifdef __FreeBSD__
20#include <pthread_np.h>
21#endif /* __FreeBSD__ */
Ed Warnickecb9cada2015-12-08 15:45:58 -070022#include <vppinfra/format.h>
Dave Barach19718002020-03-11 10:31:36 -040023#include <vppinfra/time_range.h>
Damjan Marion94100532020-11-06 23:25:57 +010024#include <vppinfra/interrupt.h>
Damjan Marion4b661402024-02-29 16:14:27 +010025#include <vppinfra/bitmap.h>
26#include <vppinfra/unix.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070027#include <vlib/vlib.h>
28
29#include <vlib/threads.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070030
Damjan Marion8973b072022-03-01 15:51:18 +010031#include <vlib/stats/stats.h>
Ole Troan233e4682019-05-16 15:01:34 +020032
Dave Barach9b8ffd92016-07-08 08:13:45 -040033u32
34vl (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070035{
36 return vec_len (p);
37}
38
Damjan Marion6a7acc22016-12-19 16:28:36 +010039vlib_worker_thread_t *vlib_worker_threads;
Ed Warnickecb9cada2015-12-08 15:45:58 -070040vlib_thread_main_t vlib_thread_main;
41
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010042/*
43 * Barrier tracing can be enabled on a normal build to collect information
44 * on barrier use, including timings and call stacks. Deliberately not
45 * keyed off CLIB_DEBUG, because that can add significant overhead which
46 * imapacts observed timings.
47 */
48
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010049static inline void
50barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
51{
Dave Barach88c6e002018-09-30 15:54:06 -040052 if (!vlib_worker_threads->barrier_elog_enabled)
53 return;
54
Damjan Marion14361002021-03-11 12:17:33 +010055 ELOG_TYPE_DECLARE (e) = {
56 .format = "bar-trace-%s-#%d",
57 .format_args = "T4i4",
58 };
59
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010060 struct
61 {
Dave Barach88c6e002018-09-30 15:54:06 -040062 u32 caller, count, t_entry, t_open, t_closed;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010063 } *ed = 0;
64
65 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
66 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
Dave Barachb09f4d02019-07-15 16:00:03 -040067 ed->caller = elog_string (&vlib_global_main.elog_main,
68 (char *) vlib_worker_threads[0].barrier_caller);
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010069 ed->t_entry = (int) (1000000.0 * t_entry);
70 ed->t_open = (int) (1000000.0 * t_open);
71 ed->t_closed = (int) (1000000.0 * t_closed);
72}
73
74static inline void
75barrier_trace_sync_rec (f64 t_entry)
76{
Dave Barach88c6e002018-09-30 15:54:06 -040077 if (!vlib_worker_threads->barrier_elog_enabled)
78 return;
79
Damjan Marion14361002021-03-11 12:17:33 +010080 ELOG_TYPE_DECLARE (e) = {
81 .format = "bar-syncrec-%s-#%d",
82 .format_args = "T4i4",
83 };
84
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010085 struct
86 {
Dave Barach88c6e002018-09-30 15:54:06 -040087 u32 caller, depth;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010088 } *ed = 0;
89
90 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
91 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
Dave Barachb09f4d02019-07-15 16:00:03 -040092 ed->caller = elog_string (&vlib_global_main.elog_main,
93 (char *) vlib_worker_threads[0].barrier_caller);
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010094}
95
96static inline void
97barrier_trace_release_rec (f64 t_entry)
98{
Dave Barach88c6e002018-09-30 15:54:06 -040099 if (!vlib_worker_threads->barrier_elog_enabled)
100 return;
101
Damjan Marion14361002021-03-11 12:17:33 +0100102 ELOG_TYPE_DECLARE (e) = {
103 .format = "bar-relrrec-#%d",
104 .format_args = "i4",
105 };
106
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100107 struct
108 {
Dave Barach88c6e002018-09-30 15:54:06 -0400109 u32 depth;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100110 } *ed = 0;
111
112 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100113 ed->depth = (int) vlib_worker_threads[0].recursion_level;
114}
115
116static inline void
117barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
118{
Dave Barach88c6e002018-09-30 15:54:06 -0400119 if (!vlib_worker_threads->barrier_elog_enabled)
120 return;
121
Damjan Marion14361002021-03-11 12:17:33 +0100122 ELOG_TYPE_DECLARE (e) = {
123 .format = "bar-rel-#%d-e%d-u%d-t%d",
124 .format_args = "i4i4i4i4",
125 };
126
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100127 struct
128 {
Dave Barach88c6e002018-09-30 15:54:06 -0400129 u32 count, t_entry, t_update_main, t_closed_total;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100130 } *ed = 0;
131
132 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
133 ed->t_entry = (int) (1000000.0 * t_entry);
134 ed->t_update_main = (int) (1000000.0 * t_update_main);
135 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
136 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
137
138 /* Reset context for next trace */
139 vlib_worker_threads[0].barrier_context = NULL;
140}
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100141
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142uword
Damjan Marionf55f9b82017-05-10 21:06:28 +0200143os_get_nthreads (void)
Dave Barach01d86c72016-08-08 15:13:42 -0400144{
Dave Barach2180bac2019-05-10 15:25:10 -0400145 return vec_len (vlib_thread_stacks);
Dave Barach01d86c72016-08-08 15:13:42 -0400146}
147
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148void
149vlib_set_thread_name (char *name)
150{
151 int pthread_setname_np (pthread_t __target_thread, const char *__name);
Dave Barachb2a6e252016-07-27 10:00:58 -0400152 int rv;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400153 pthread_t thread = pthread_self ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154
Dave Barach9b8ffd92016-07-08 08:13:45 -0400155 if (thread)
Dave Barachb2a6e252016-07-27 10:00:58 -0400156 {
157 rv = pthread_setname_np (thread, name);
158 if (rv)
Ed Warnicke853e7202016-08-12 11:42:26 -0700159 clib_warning ("pthread_setname_np returned %d", rv);
Dave Barachb2a6e252016-07-27 10:00:58 -0400160 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161}
162
Dave Barach9b8ffd92016-07-08 08:13:45 -0400163static int
164sort_registrations_by_no_clone (void *a0, void *a1)
165{
166 vlib_thread_registration_t **tr0 = a0;
167 vlib_thread_registration_t **tr1 = a1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
Dave Barach9b8ffd92016-07-08 08:13:45 -0400169 return ((i32) ((*tr0)->no_data_structure_clone)
170 - ((i32) ((*tr1)->no_data_structure_clone)));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171}
172
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173
174/* Called early in the init sequence */
175
176clib_error_t *
177vlib_thread_init (vlib_main_t * vm)
178{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400179 vlib_thread_main_t *tm = &vlib_thread_main;
180 vlib_worker_thread_t *w;
181 vlib_thread_registration_t *tr;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700182 u32 n_vlib_mains = 1;
183 u32 first_index = 1;
184 u32 i;
hsandid71c32a82024-03-25 17:51:31 +0100185 pid_t pid;
186 uword *avail_cpu, *affinity_cpu;
187 uword n_cpus;
Damjan Marion8973b072022-03-01 15:51:18 +0100188 u32 stats_num_worker_threads_dir_index;
189
190 stats_num_worker_threads_dir_index =
191 vlib_stats_add_gauge ("/sys/num_worker_threads");
192 ASSERT (stats_num_worker_threads_dir_index != ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193
194 /* get bitmaps of active cpu cores and sockets */
Damjan Marion4b661402024-02-29 16:14:27 +0100195 tm->cpu_core_bitmap = os_get_online_cpu_core_bitmap ();
196 tm->cpu_socket_bitmap = os_get_online_cpu_node_bitmap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700197
hsandid71c32a82024-03-25 17:51:31 +0100198 /* get bitmap of active cpu cores vpp has affinity to */
199 pid = getpid ();
200 tm->cpu_affinity_bitmap = os_get_cpu_affinity_bitmap (pid);
201
202 /* if fetching affinity fails, return online cpu core bmp */
203 if (tm->cpu_affinity_bitmap == 0)
204 tm->cpu_affinity_bitmap = os_get_online_cpu_core_bitmap ();
205
Dave Barach9b8ffd92016-07-08 08:13:45 -0400206 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
hsandid71c32a82024-03-25 17:51:31 +0100207 affinity_cpu = clib_bitmap_dup (tm->cpu_affinity_bitmap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208
209 /* skip cores */
hsandid71c32a82024-03-25 17:51:31 +0100210 n_cpus = clib_bitmap_count_set_bits (avail_cpu);
211 if (tm->skip_cores >= n_cpus)
212 return clib_error_return (0, "skip-core greater than available cpus");
213 n_cpus = clib_bitmap_count_set_bits (affinity_cpu);
214 if (tm->skip_cores >= n_cpus)
215 return clib_error_return (0, "skip-core greater than affinity cpus");
216
Dave Barach9b8ffd92016-07-08 08:13:45 -0400217 for (i = 0; i < tm->skip_cores; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218 {
hsandid71c32a82024-03-25 17:51:31 +0100219 uword c;
220 c = clib_bitmap_first_set (avail_cpu);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221 if (c == ~0)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400222 return clib_error_return (0, "no available cpus to skip");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700223
Dave Barach9b8ffd92016-07-08 08:13:45 -0400224 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
hsandid71c32a82024-03-25 17:51:31 +0100225
226 c = clib_bitmap_first_set (affinity_cpu);
227 if (c == ~0)
228 return clib_error_return (0, "no available env cpus to skip");
229
230 affinity_cpu = clib_bitmap_set (affinity_cpu, c, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231 }
232
233 /* grab cpu for main thread */
Benoît Ganne8b153de2021-12-09 18:24:21 +0100234 if (tm->main_lcore != ~0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700235 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400236 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
237 return clib_error_return (0, "cpu %u is not available to be used"
238 " for the main thread", tm->main_lcore);
Benoît Ganne8b153de2021-12-09 18:24:21 +0100239 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
hsandid71c32a82024-03-25 17:51:31 +0100240 affinity_cpu = clib_bitmap_set (affinity_cpu, tm->main_lcore, 0);
241 }
242 /* if auto enabled, grab first cpu vpp has affinity to for main thread */
243 else if (tm->use_main_core_auto)
244 {
245 uword c = clib_bitmap_first_set (affinity_cpu);
246 if (c != ~0)
247 tm->main_lcore = c;
248
249 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
250 affinity_cpu = clib_bitmap_set (affinity_cpu, tm->main_lcore, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252
253 /* assume that there is socket 0 only if there is no data from sysfs */
254 if (!tm->cpu_socket_bitmap)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400255 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256
Christophe Fontainefef15b42016-04-09 12:38:49 +0900257 /* pin main thread to main_lcore */
Benoît Ganne8b153de2021-12-09 18:24:21 +0100258 if (tm->main_lcore != ~0)
259 {
260 cpu_set_t cpuset;
261 CPU_ZERO (&cpuset);
262 CPU_SET (tm->main_lcore, &cpuset);
hsandid832342e2023-12-20 15:41:54 +0100263 if (pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t),
264 &cpuset))
265 {
266 return clib_error_return (0, "could not pin main thread to cpu %u",
267 tm->main_lcore);
268 }
Benoît Ganne8b153de2021-12-09 18:24:21 +0100269 }
Christophe Fontainefef15b42016-04-09 12:38:49 +0900270
Dave Barach2180bac2019-05-10 15:25:10 -0400271 /* Set up thread 0 */
272 vec_validate_aligned (vlib_worker_threads, 0, CLIB_CACHE_LINE_BYTES);
Damjan Marion8bea5892022-04-04 22:40:45 +0200273 vec_set_len (vlib_worker_threads, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274 w = vlib_worker_threads;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400275 w->thread_mheap = clib_mem_get_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276 w->thread_stack = vlib_thread_stacks[0];
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200277 w->cpu_id = tm->main_lcore;
Tom Jonesd346b882024-01-29 10:55:09 +0000278#ifdef __FreeBSD__
279 w->lwp = pthread_getthreadid_np ();
280#else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400281 w->lwp = syscall (SYS_gettid);
Tom Jonesd346b882024-01-29 10:55:09 +0000282#endif /* __FreeBSD__ */
Pavel Kotucek98765202016-10-07 08:37:28 +0200283 w->thread_id = pthread_self ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284 tm->n_vlib_mains = 1;
285
Jon Loeligerf617b142020-01-30 18:37:47 -0600286 vlib_get_thread_core_numa (w, w->cpu_id);
287
Pavel Kotucek1e765832016-09-23 08:54:14 +0200288 if (tm->sched_policy != ~0)
289 {
290 struct sched_param sched_param;
291 if (!sched_getparam (w->lwp, &sched_param))
292 {
293 if (tm->sched_priority != ~0)
294 sched_param.sched_priority = tm->sched_priority;
295 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
296 }
297 }
298
Ed Warnickecb9cada2015-12-08 15:45:58 -0700299 /* assign threads to cores and set n_vlib_mains */
300 tr = tm->next;
301
302 while (tr)
303 {
304 vec_add1 (tm->registrations, tr);
305 tr = tr->next;
306 }
307
Dave Barach9b8ffd92016-07-08 08:13:45 -0400308 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309
310 for (i = 0; i < vec_len (tm->registrations); i++)
311 {
312 int j;
313 tr = tm->registrations[i];
314 tr->first_index = first_index;
315 first_index += tr->count;
316 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
317
318 /* construct coremask */
319 if (tr->use_pthreads || !tr->count)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400320 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321
322 if (tr->coremask)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400323 {
324 uword c;
Damjan Marionf0ca1e82020-12-13 23:26:56 +0100325 clib_bitmap_foreach (c, tr->coremask) {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326 if (clib_bitmap_get(avail_cpu, c) == 0)
327 return clib_error_return (0, "cpu %u is not available to be used"
328 " for the '%s' thread",c, tr->name);
329
330 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
Damjan Marionf0ca1e82020-12-13 23:26:56 +0100331 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400332 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400334 {
hsandid71c32a82024-03-25 17:51:31 +0100335 /* for automatic pinning, use cpu affinity list */
336 uword n_env_cpu = 0;
337 n_env_cpu = clib_bitmap_count_set_bits (affinity_cpu);
338
339 if (n_env_cpu < tr->count)
340 return clib_error_return (0,
341 "no available cpus to be used for"
342 " the '%s' thread #%u",
343 tr->name, n_env_cpu);
344
Dave Barach9b8ffd92016-07-08 08:13:45 -0400345 for (j = 0; j < tr->count; j++)
346 {
Vladimir Isaev2ed42042020-03-17 12:56:31 +0300347 /* Do not use CPU 0 by default - leave it to the host and IRQs */
hsandid71c32a82024-03-25 17:51:31 +0100348 uword avail_c0 = clib_bitmap_get (affinity_cpu, 0);
349 affinity_cpu = clib_bitmap_set (affinity_cpu, 0, 0);
Vladimir Isaev2ed42042020-03-17 12:56:31 +0300350
hsandid71c32a82024-03-25 17:51:31 +0100351 uword c = clib_bitmap_first_set (affinity_cpu);
Vladimir Isaev2ed42042020-03-17 12:56:31 +0300352 /* Use CPU 0 as a last resort */
353 if (c == ~0 && avail_c0)
354 {
355 c = 0;
356 avail_c0 = 0;
357 }
358
Dave Barach9b8ffd92016-07-08 08:13:45 -0400359 if (c == ~0)
360 return clib_error_return (0,
361 "no available cpus to be used for"
hsandid832342e2023-12-20 15:41:54 +0100362 " the '%s' thread #%u",
363 tr->name, tr->count);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364
hsandid71c32a82024-03-25 17:51:31 +0100365 affinity_cpu = clib_bitmap_set (affinity_cpu, 0, avail_c0);
366 affinity_cpu = clib_bitmap_set (affinity_cpu, c, 0);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400367 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
368 }
369 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700370 }
371
Dave Barach9b8ffd92016-07-08 08:13:45 -0400372 clib_bitmap_free (avail_cpu);
hsandid71c32a82024-03-25 17:51:31 +0100373 clib_bitmap_free (affinity_cpu);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700374
375 tm->n_vlib_mains = n_vlib_mains;
Damjan Marion8973b072022-03-01 15:51:18 +0100376 vlib_stats_set_gauge (stats_num_worker_threads_dir_index, n_vlib_mains - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700377
Dave Barach2180bac2019-05-10 15:25:10 -0400378 /*
379 * Allocate the remaining worker threads, and thread stack vector slots
380 * from now on, calls to os_get_nthreads() will return the correct
381 * answer.
382 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400383 vec_validate_aligned (vlib_worker_threads, first_index - 1,
384 CLIB_CACHE_LINE_BYTES);
Dave Barach2180bac2019-05-10 15:25:10 -0400385 vec_validate (vlib_thread_stacks, vec_len (vlib_worker_threads) - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700386 return 0;
387}
388
Dave Barach9b8ffd92016-07-08 08:13:45 -0400389vlib_frame_queue_t *
390vlib_frame_queue_alloc (int nelts)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700391{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400392 vlib_frame_queue_t *fq;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700393
Dave Barach9b8ffd92016-07-08 08:13:45 -0400394 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
Dave Barachb7b92992018-10-17 10:38:51 -0400395 clib_memset (fq, 0, sizeof (*fq));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396 fq->nelts = nelts;
Damjan Marionc0d9ca72021-05-11 09:39:24 +0200397 fq->vector_threshold = 2 * VLIB_FRAME_SIZE;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400398 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700399
Damjan Marionc0d9ca72021-05-11 09:39:24 +0200400 if (nelts & (nelts - 1))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400401 {
Damjan Marionc0d9ca72021-05-11 09:39:24 +0200402 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
403 abort ();
Dave Barach9b8ffd92016-07-08 08:13:45 -0400404 }
405
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406 return (fq);
407}
408
409void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400410void
411vl_msg_api_handler_no_free (void *v)
412{
413}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414
Ed Warnickecb9cada2015-12-08 15:45:58 -0700415/* To be called by vlib worker threads upon startup */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400416void
417vlib_worker_thread_init (vlib_worker_thread_t * w)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700418{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400419 vlib_thread_main_t *tm = vlib_get_thread_main ();
420
Dave Barachfdf49442016-12-20 12:48:14 -0500421 /*
422 * Note: disabling signals in worker threads as follows
423 * prevents the api post-mortem dump scheme from working
424 * {
425 * sigset_t s;
426 * sigfillset (&s);
427 * pthread_sigmask (SIG_SETMASK, &s, 0);
428 * }
429 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430
431 clib_mem_set_heap (w->thread_mheap);
432
Dave Barach9b8ffd92016-07-08 08:13:45 -0400433 if (vec_len (tm->thread_prefix) && w->registration->short_name)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400435 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
436 w->registration->short_name, w->instance_id, '\0');
437 vlib_set_thread_name ((char *) w->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700438 }
439
440 if (!w->registration->use_pthreads)
441 {
442
443 /* Initial barrier sync, for both worker and i/o threads */
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000444 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700445
446 while (*vlib_worker_threads->wait_at_barrier)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400447 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700448
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000449 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700450 }
451}
452
Dave Barach9b8ffd92016-07-08 08:13:45 -0400453void *
454vlib_worker_thread_bootstrap_fn (void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700456 vlib_worker_thread_t *w = arg;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400457
Tom Jonesd346b882024-01-29 10:55:09 +0000458#ifdef __FreeBSD__
459 w->lwp = pthread_getthreadid_np ();
460#else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400461 w->lwp = syscall (SYS_gettid);
Tom Jonesd346b882024-01-29 10:55:09 +0000462#endif /* __FreeBSD__ */
Pavel Kotucek98765202016-10-07 08:37:28 +0200463 w->thread_id = pthread_self ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700464
Damjan Marionf55f9b82017-05-10 21:06:28 +0200465 __os_thread_index = w - vlib_worker_threads;
Damjan Marion586afd72017-04-05 19:18:20 +0200466
Damjan Marion7bf23172022-03-28 01:47:33 +0200467 if (CLIB_DEBUG > 0)
468 {
469 void *frame_addr = __builtin_frame_address (0);
470 if (frame_addr < (void *) w->thread_stack ||
471 frame_addr > (void *) w->thread_stack + VLIB_THREAD_STACK_SIZE)
472 {
473 /* heap is not set yet */
474 fprintf (stderr, "thread stack is not set properly\n");
475 exit (1);
476 }
477 }
Dave Barach37579c32021-07-27 09:27:07 -0400478
Damjan Marion7bf23172022-03-28 01:47:33 +0200479 w->thread_function (arg);
480
481 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700482}
483
Dave Baracha690fdb2020-01-21 12:34:55 -0500484void
485vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id)
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200486{
Lijian.Zhang4fbb9da2020-02-14 15:16:49 +0800487 clib_bitmap_t *nbmp = 0, *cbmp = 0;
Damjan Marion3eb6cbe2024-02-12 19:44:58 +0000488 int node, core_id = -1, numa_id = -1;
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200489
Damjan Marion3eb6cbe2024-02-12 19:44:58 +0000490 core_id = os_get_cpu_phys_core_id (cpu_id);
Damjan Marion4b661402024-02-29 16:14:27 +0100491 nbmp = os_get_online_cpu_node_bitmap ();
Damjan Marion3eb6cbe2024-02-12 19:44:58 +0000492
Damjan Marionf0ca1e82020-12-13 23:26:56 +0100493 clib_bitmap_foreach (node, nbmp) {
Damjan Marion3eb6cbe2024-02-12 19:44:58 +0000494 cbmp = os_get_cpu_on_node_bitmap (node);
495 if (clib_bitmap_get (cbmp, cpu_id))
496 numa_id = node;
497 vec_reset_length (cbmp);
Damjan Marionf0ca1e82020-12-13 23:26:56 +0100498 }
Damjan Marion4b661402024-02-29 16:14:27 +0100499
Lijian.Zhang4fbb9da2020-02-14 15:16:49 +0800500 vec_free (nbmp);
501 vec_free (cbmp);
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200502
503 w->core_id = core_id;
Dave Baracha690fdb2020-01-21 12:34:55 -0500504 w->numa_id = numa_id;
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200505}
506
Damjan Marion878c6092017-01-04 13:19:27 +0100507static clib_error_t *
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200508vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700509{
Damjan Marion57d1ec02020-09-16 21:15:44 +0200510 clib_mem_main_t *mm = &clib_mem_main;
Damjan Marion878c6092017-01-04 13:19:27 +0100511 vlib_thread_main_t *tm = &vlib_thread_main;
Damjan Mariona2b7a022021-12-28 20:32:20 +0100512 pthread_t worker;
Damjan Marion7bf23172022-03-28 01:47:33 +0200513 pthread_attr_t attr;
Damjan Mariona2b7a022021-12-28 20:32:20 +0100514 cpu_set_t cpuset;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400515 void *(*fp_arg) (void *) = fp;
Dave Baracha690fdb2020-01-21 12:34:55 -0500516 void *numa_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700517
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200518 w->cpu_id = cpu_id;
Dave Baracha690fdb2020-01-21 12:34:55 -0500519 vlib_get_thread_core_numa (w, cpu_id);
Dave Baracha690fdb2020-01-21 12:34:55 -0500520
521 /* Set up NUMA-bound heap if indicated */
Damjan Marion57d1ec02020-09-16 21:15:44 +0200522 if (mm->per_numa_mheaps[w->numa_id] == 0)
Dave Baracha690fdb2020-01-21 12:34:55 -0500523 {
524 /* If the user requested a NUMA heap, create it... */
525 if (tm->numa_heap_size)
526 {
Damjan Marion2bc1af52020-10-02 15:01:12 +0200527 clib_mem_set_numa_affinity (w->numa_id, 1 /* force */ );
528 numa_heap = clib_mem_create_heap (0 /* DIY */ , tm->numa_heap_size,
529 1 /* is_locked */ ,
530 "numa %u heap", w->numa_id);
531 clib_mem_set_default_numa_affinity ();
Damjan Marion57d1ec02020-09-16 21:15:44 +0200532 mm->per_numa_mheaps[w->numa_id] = numa_heap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500533 }
534 else
535 {
536 /* Or, use the main heap */
Damjan Marion57d1ec02020-09-16 21:15:44 +0200537 mm->per_numa_mheaps[w->numa_id] = w->thread_mheap;
Dave Baracha690fdb2020-01-21 12:34:55 -0500538 }
539 }
540
Dave Barach9b8ffd92016-07-08 08:13:45 -0400541 CPU_ZERO (&cpuset);
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200542 CPU_SET (cpu_id, &cpuset);
Christophe Fontainefef15b42016-04-09 12:38:49 +0900543
Damjan Marion7bf23172022-03-28 01:47:33 +0200544 if (pthread_attr_init (&attr))
545 return clib_error_return_unix (0, "pthread_attr_init");
546
547 if (pthread_attr_setstack (&attr, w->thread_stack,
548 VLIB_THREAD_STACK_SIZE))
549 return clib_error_return_unix (0, "pthread_attr_setstack");
550
551 if (pthread_create (&worker, &attr, fp_arg, (void *) w))
Damjan Marion878c6092017-01-04 13:19:27 +0100552 return clib_error_return_unix (0, "pthread_create");
553
554 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
555 return clib_error_return_unix (0, "pthread_setaffinity_np");
556
Damjan Marion7bf23172022-03-28 01:47:33 +0200557 if (pthread_attr_destroy (&attr))
558 return clib_error_return_unix (0, "pthread_attr_destroy");
559
Damjan Marion878c6092017-01-04 13:19:27 +0100560 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700561}
562
Dave Barach9b8ffd92016-07-08 08:13:45 -0400563static clib_error_t *
564start_workers (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700565{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100566 vlib_global_main_t *vgm = vlib_get_global_main ();
Damjan Marion66c85832022-03-14 13:04:38 +0100567 vlib_main_t *fvm = vlib_get_first_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700568 int i, j;
569 vlib_worker_thread_t *w;
570 vlib_main_t *vm_clone;
571 void *oldheap;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400572 vlib_thread_main_t *tm = &vlib_thread_main;
573 vlib_thread_registration_t *tr;
574 vlib_node_runtime_t *rt;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700575 u32 n_vlib_mains = tm->n_vlib_mains;
576 u32 worker_thread_index;
Damjan Marion66c85832022-03-14 13:04:38 +0100577 u32 stats_err_entry_index = fvm->error_main.stats_err_entry_index;
Damjan Marionbfa75d62020-10-06 17:46:06 +0200578 clib_mem_heap_t *main_heap = clib_mem_get_per_cpu_heap ();
Ole Troana606d922021-05-05 09:23:17 +0200579 vlib_stats_register_mem_heap (main_heap);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400580
Ed Warnickecb9cada2015-12-08 15:45:58 -0700581 vec_reset_length (vlib_worker_threads);
582
583 /* Set up the main thread */
584 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
Dave Barach3e7deb12016-02-05 16:29:53 -0500585 w->elog_track.name = "main thread";
Damjan Marionf553a2c2021-03-26 13:45:37 +0100586 elog_track_register (vlib_get_elog_main (), &w->elog_track);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700587
Dave Barach9b8ffd92016-07-08 08:13:45 -0400588 if (vec_len (tm->thread_prefix))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700589 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400590 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
591 vlib_set_thread_name ((char *) w->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700592 }
593
Damjan Marionfd8deb42021-03-06 12:26:28 +0100594 vgm->elog_main.lock =
Dave Barach9b8ffd92016-07-08 08:13:45 -0400595 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
Damjan Marionfd8deb42021-03-06 12:26:28 +0100596 vgm->elog_main.lock[0] = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400597
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000598 clib_callback_data_init (&vm->vlib_node_runtime_perf_callbacks,
599 &vm->worker_thread_main_loop_callback_lock);
600
Damjan Marionfd8deb42021-03-06 12:26:28 +0100601 vec_validate_aligned (vgm->vlib_mains, n_vlib_mains - 1,
602 CLIB_CACHE_LINE_BYTES);
Damjan Marion8bea5892022-04-04 22:40:45 +0200603 vec_set_len (vgm->vlib_mains, 0);
Damjan Marionfd8deb42021-03-06 12:26:28 +0100604 vec_add1_aligned (vgm->vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
Dave Barach1c556d12020-10-02 11:35:44 -0400605
Ed Warnickecb9cada2015-12-08 15:45:58 -0700606 if (n_vlib_mains > 1)
607 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400608 vlib_worker_threads->wait_at_barrier =
609 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700610 vlib_worker_threads->workers_at_barrier =
Dave Barach9b8ffd92016-07-08 08:13:45 -0400611 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700612
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100613 vlib_worker_threads->node_reforks_required =
614 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
615
Dave Barachf6c68d72018-11-01 08:12:52 -0400616 /* We'll need the rpc vector lock... */
617 clib_spinlock_init (&vm->pending_rpc_lock);
618
Ed Warnickecb9cada2015-12-08 15:45:58 -0700619 /* Ask for an initial barrier sync */
620 *vlib_worker_threads->workers_at_barrier = 0;
621 *vlib_worker_threads->wait_at_barrier = 1;
622
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100623 /* Without update or refork */
624 *vlib_worker_threads->node_reforks_required = 0;
Damjan Marionfd8deb42021-03-06 12:26:28 +0100625 vgm->need_vlib_worker_thread_node_runtime_update = 0;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100626
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100627 /* init timing */
628 vm->barrier_epoch = 0;
629 vm->barrier_no_close_before = 0;
630
Ed Warnickecb9cada2015-12-08 15:45:58 -0700631 worker_thread_index = 1;
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000632 clib_spinlock_init (&vm->worker_thread_main_loop_callback_lock);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700633
Dave Barach9b8ffd92016-07-08 08:13:45 -0400634 for (i = 0; i < vec_len (tm->registrations); i++)
635 {
636 vlib_node_main_t *nm, *nm_clone;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400637 int k;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700638
Dave Barach9b8ffd92016-07-08 08:13:45 -0400639 tr = tm->registrations[i];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700640
Dave Barach9b8ffd92016-07-08 08:13:45 -0400641 if (tr->count == 0)
642 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700643
Dave Barach9b8ffd92016-07-08 08:13:45 -0400644 for (k = 0; k < tr->count; k++)
645 {
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100646 vlib_node_t *n;
Damjan Marion66c85832022-03-14 13:04:38 +0100647 u64 **c;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100648
Dave Barach9b8ffd92016-07-08 08:13:45 -0400649 vec_add2 (vlib_worker_threads, w, 1);
Dave Barach6a5adc32018-07-04 10:56:23 -0400650 /* Currently unused, may not really work */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400651 if (tr->mheap_size)
Damjan Marion4537c302020-09-28 19:03:37 +0200652 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
653 /* unlocked */ 0,
654 "%s%d heap",
655 tr->name, k);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400656 else
657 w->thread_mheap = main_heap;
Damjan Marion586afd72017-04-05 19:18:20 +0200658
659 w->thread_stack =
660 vlib_thread_stack_init (w - vlib_worker_threads);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400661 w->thread_function = tr->function;
662 w->thread_function_arg = w;
663 w->instance_id = k;
664 w->registration = tr;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700665
Dave Barach9b8ffd92016-07-08 08:13:45 -0400666 w->elog_track.name =
667 (char *) format (0, "%s %d", tr->name, k + 1);
668 vec_add1 (w->elog_track.name, 0);
Damjan Marionf553a2c2021-03-26 13:45:37 +0100669 elog_track_register (vlib_get_elog_main (), &w->elog_track);
Bud Grise68adab92016-02-12 10:36:11 -0500670
Dave Barach9b8ffd92016-07-08 08:13:45 -0400671 if (tr->no_data_structure_clone)
672 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700673
Dave Barach9b8ffd92016-07-08 08:13:45 -0400674 /* Fork vlib_global_main et al. Look for bugs here */
675 oldheap = clib_mem_set_heap (w->thread_mheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700676
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200677 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
678 CLIB_CACHE_LINE_BYTES);
Damjan Marionfd8deb42021-03-06 12:26:28 +0100679 clib_memcpy (vm_clone, vlib_get_first_main (),
680 sizeof (*vm_clone));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700681
Damjan Marion586afd72017-04-05 19:18:20 +0200682 vm_clone->thread_index = worker_thread_index;
Dave Barach2877eee2017-12-15 12:22:57 -0500683 vm_clone->pending_rpc_requests = 0;
684 vec_validate (vm_clone->pending_rpc_requests, 0);
Damjan Marion8bea5892022-04-04 22:40:45 +0200685 vec_set_len (vm_clone->pending_rpc_requests, 0);
Dave Barachb7b92992018-10-17 10:38:51 -0400686 clib_memset (&vm_clone->random_buffer, 0,
687 sizeof (vm_clone->random_buffer));
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000688 clib_spinlock_init
689 (&vm_clone->worker_thread_main_loop_callback_lock);
690 clib_callback_data_init
691 (&vm_clone->vlib_node_runtime_perf_callbacks,
692 &vm_clone->worker_thread_main_loop_callback_lock);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700693
Damjan Marionfd8deb42021-03-06 12:26:28 +0100694 nm = &vlib_get_first_main ()->node_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400695 nm_clone = &vm_clone->node_main;
696 /* fork next frames array, preserving node runtime indices */
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200697 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
698 CLIB_CACHE_LINE_BYTES);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400699 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
700 {
701 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
702 u32 save_node_runtime_index;
703 u32 save_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700704
Dave Barach9b8ffd92016-07-08 08:13:45 -0400705 save_node_runtime_index = nf->node_runtime_index;
706 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
707 vlib_next_frame_init (nf);
708 nf->node_runtime_index = save_node_runtime_index;
709 nf->flags = save_flags;
710 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700711
Dave Barach9b8ffd92016-07-08 08:13:45 -0400712 /* fork the frame dispatch queue */
713 nm_clone->pending_frames = 0;
Dave Barach53fe4a72019-01-26 09:50:26 -0500714 vec_validate (nm_clone->pending_frames, 10);
Damjan Marion8bea5892022-04-04 22:40:45 +0200715 vec_set_len (nm_clone->pending_frames, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700716
Dave Barach9b8ffd92016-07-08 08:13:45 -0400717 /* fork nodes */
718 nm_clone->nodes = 0;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100719
720 /* Allocate all nodes in single block for speed */
721 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
722
Dave Barach9b8ffd92016-07-08 08:13:45 -0400723 for (j = 0; j < vec_len (nm->nodes); j++)
724 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400725 clib_memcpy (n, nm->nodes[j], sizeof (*n));
726 /* none of the copied nodes have enqueue rights given out */
727 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
Dave Barachb7b92992018-10-17 10:38:51 -0400728 clib_memset (&n->stats_total, 0, sizeof (n->stats_total));
729 clib_memset (&n->stats_last_clear, 0,
730 sizeof (n->stats_last_clear));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400731 vec_add1 (nm_clone->nodes, n);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100732 n++;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400733 }
734 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200735 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
736 CLIB_CACHE_LINE_BYTES);
JingLiuZTE30af5da2017-07-24 10:53:31 +0800737 vec_foreach (rt,
738 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
Damjan Marione9f929b2017-03-16 11:32:09 +0100739 {
740 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Damjan Marione9f929b2017-03-16 11:32:09 +0100741 /* copy initial runtime_data from node */
Damjan Marionb6f93a12017-03-16 17:46:41 +0100742 if (n->runtime_data && n->runtime_data_bytes > 0)
Damjan Marione9f929b2017-03-16 11:32:09 +0100743 clib_memcpy (rt->runtime_data, n->runtime_data,
Damjan Marionb6f93a12017-03-16 17:46:41 +0100744 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
745 n->runtime_data_bytes));
Damjan Marione9f929b2017-03-16 11:32:09 +0100746 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700747
Dave Barach9b8ffd92016-07-08 08:13:45 -0400748 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200749 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
750 CLIB_CACHE_LINE_BYTES);
Damjan Marion94100532020-11-06 23:25:57 +0100751 clib_interrupt_init (
Damjan Marioncc8249c2023-07-23 14:24:22 +0200752 &nm_clone->input_node_interrupts,
Damjan Marion94100532020-11-06 23:25:57 +0100753 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
Damjan Marioncc8249c2023-07-23 14:24:22 +0200754 clib_interrupt_init (
755 &nm_clone->pre_input_node_interrupts,
756 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT]));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400757 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
Damjan Marione9f929b2017-03-16 11:32:09 +0100758 {
759 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Damjan Marione9f929b2017-03-16 11:32:09 +0100760 /* copy initial runtime_data from node */
Damjan Marionb6f93a12017-03-16 17:46:41 +0100761 if (n->runtime_data && n->runtime_data_bytes > 0)
Damjan Marione9f929b2017-03-16 11:32:09 +0100762 clib_memcpy (rt->runtime_data, n->runtime_data,
Damjan Marionb6f93a12017-03-16 17:46:41 +0100763 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
764 n->runtime_data_bytes));
Damjan Marione9f929b2017-03-16 11:32:09 +0100765 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700766
Dave Barach53fe4a72019-01-26 09:50:26 -0500767 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
768 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
769 CLIB_CACHE_LINE_BYTES);
770 vec_foreach (rt,
771 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
772 {
773 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Dave Barach53fe4a72019-01-26 09:50:26 -0500774 /* copy initial runtime_data from node */
775 if (n->runtime_data && n->runtime_data_bytes > 0)
776 clib_memcpy (rt->runtime_data, n->runtime_data,
777 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
778 n->runtime_data_bytes));
779 }
780
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200781 nm_clone->processes = vec_dup_aligned (nm->processes,
782 CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700783
Dave Barach593eedf2019-03-10 09:44:51 -0400784 /* Create per-thread frame freelist */
Damjan Marionb32bd702021-12-23 17:05:02 +0100785 nm_clone->frame_sizes = 0;
Dave Barach687c9022019-07-23 10:22:31 -0400786 nm_clone->node_by_error = nm->node_by_error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700787
Dave Barach9b8ffd92016-07-08 08:13:45 -0400788 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
Damjan Marionbc20bdf2015-12-17 14:28:18 +0100789
Dave Barach9b8ffd92016-07-08 08:13:45 -0400790 clib_mem_set_heap (oldheap);
Damjan Marionfd8deb42021-03-06 12:26:28 +0100791 vec_add1_aligned (vgm->vlib_mains, vm_clone,
792 CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700793
Ole Troan233e4682019-05-16 15:01:34 +0200794 /* Switch to the stats segment ... */
Damjan Marion66c85832022-03-14 13:04:38 +0100795 vlib_stats_validate (stats_err_entry_index, worker_thread_index,
796 vec_len (fvm->error_main.counters) - 1);
797 c = vlib_stats_get_entry_data_pointer (stats_err_entry_index);
798 vm_clone->error_main.counters = c[worker_thread_index];
Ole Troan233e4682019-05-16 15:01:34 +0200799
Damjan Marionfd8deb42021-03-06 12:26:28 +0100800 vm_clone->error_main.counters_last_clear = vec_dup_aligned (
801 vlib_get_first_main ()->error_main.counters_last_clear,
802 CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700803
Dave Barach9b8ffd92016-07-08 08:13:45 -0400804 worker_thread_index++;
805 }
806 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700807 }
808 else
809 {
810 /* only have non-data-structure copy threads to create... */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400811 for (i = 0; i < vec_len (tm->registrations); i++)
812 {
813 tr = tm->registrations[i];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700814
Dave Barach9b8ffd92016-07-08 08:13:45 -0400815 for (j = 0; j < tr->count; j++)
816 {
817 vec_add2 (vlib_worker_threads, w, 1);
818 if (tr->mheap_size)
Dave Barach6a5adc32018-07-04 10:56:23 -0400819 {
Damjan Marion4537c302020-09-28 19:03:37 +0200820 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
821 /* locked */ 0,
822 "%s%d heap",
823 tr->name, j);
Dave Barach6a5adc32018-07-04 10:56:23 -0400824 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400825 else
826 w->thread_mheap = main_heap;
Damjan Marion586afd72017-04-05 19:18:20 +0200827 w->thread_stack =
828 vlib_thread_stack_init (w - vlib_worker_threads);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400829 w->thread_function = tr->function;
830 w->thread_function_arg = w;
831 w->instance_id = j;
832 w->elog_track.name =
833 (char *) format (0, "%s %d", tr->name, j + 1);
834 w->registration = tr;
835 vec_add1 (w->elog_track.name, 0);
Damjan Marionf553a2c2021-03-26 13:45:37 +0100836 elog_track_register (vlib_get_elog_main (), &w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400837 }
838 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700839 }
840
841 worker_thread_index = 1;
842
843 for (i = 0; i < vec_len (tm->registrations); i++)
844 {
Damjan Marion878c6092017-01-04 13:19:27 +0100845 clib_error_t *err;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700846 int j;
847
848 tr = tm->registrations[i];
849
850 if (tr->use_pthreads || tm->use_pthreads)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400851 {
852 for (j = 0; j < tr->count; j++)
853 {
hsandid832342e2023-12-20 15:41:54 +0100854
Dave Barach9b8ffd92016-07-08 08:13:45 -0400855 w = vlib_worker_threads + worker_thread_index++;
Damjan Marion878c6092017-01-04 13:19:27 +0100856 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
857 w, 0);
858 if (err)
hsandid832342e2023-12-20 15:41:54 +0100859 clib_unix_error ("%U, thread %s init on cpu %d failed",
860 format_clib_error, err, tr->name, 0);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400861 }
862 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700863 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400864 {
865 uword c;
Damjan Marionf0ca1e82020-12-13 23:26:56 +0100866 clib_bitmap_foreach (c, tr->coremask) {
Damjan Marion878c6092017-01-04 13:19:27 +0100867 w = vlib_worker_threads + worker_thread_index++;
868 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
869 w, c);
870 if (err)
hsandid832342e2023-12-20 15:41:54 +0100871 clib_unix_error ("%U, thread %s init on cpu %d failed",
872 format_clib_error, err, tr->name, c);
873 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400874 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700875 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400876 vlib_worker_thread_barrier_sync (vm);
Damjan Marion321bd102022-06-01 00:45:18 +0200877 {
878 clib_error_t *err;
879 err = vlib_call_init_exit_functions (
880 vm, &vgm->num_workers_change_function_registrations, 1 /* call_once */,
881 1 /* is_global */);
882 if (err)
883 clib_error_report (err);
884 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400885 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700886 return 0;
887}
888
889VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
890
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100891
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100892static inline void
893worker_thread_node_runtime_update_internal (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700894{
895 int i, j;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700896 vlib_main_t *vm;
897 vlib_node_main_t *nm, *nm_clone;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700898 vlib_main_t *vm_clone;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100899 vlib_node_runtime_t *rt;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400900
Damjan Marion586afd72017-04-05 19:18:20 +0200901 ASSERT (vlib_get_thread_index () == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700902
Damjan Marion6ffb7c62021-03-26 13:06:13 +0100903 vm = vlib_get_first_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700904 nm = &vm->node_main;
905
Ed Warnickecb9cada2015-12-08 15:45:58 -0700906 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
907
Dave Barach9b8ffd92016-07-08 08:13:45 -0400908 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700909 * Scrape all runtime stats, so we don't lose node runtime(s) with
910 * pending counts, or throw away worker / io thread counts.
911 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400912 for (j = 0; j < vec_len (nm->nodes); j++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700913 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400914 vlib_node_t *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700915 n = nm->nodes[j];
916 vlib_node_sync_stats (vm, n);
917 }
918
Damjan Marion6ffb7c62021-03-26 13:06:13 +0100919 for (i = 1; i < vlib_get_n_threads (); i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700920 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400921 vlib_node_t *n;
922
Damjan Marion6ffb7c62021-03-26 13:06:13 +0100923 vm_clone = vlib_get_main_by_index (i);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700924 nm_clone = &vm_clone->node_main;
925
Dave Barach9b8ffd92016-07-08 08:13:45 -0400926 for (j = 0; j < vec_len (nm_clone->nodes); j++)
927 {
928 n = nm_clone->nodes[j];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700929
Dave Barach9b8ffd92016-07-08 08:13:45 -0400930 rt = vlib_node_get_runtime (vm_clone, n->index);
931 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
932 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700933 }
934
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100935 /* Per-worker clone rebuilds are now done on each thread */
936}
937
938
939void
940vlib_worker_thread_node_refork (void)
941{
942 vlib_main_t *vm, *vm_clone;
943 vlib_node_main_t *nm, *nm_clone;
944 vlib_node_t **old_nodes_clone;
945 vlib_node_runtime_t *rt, *old_rt;
Damjan Marion66c85832022-03-14 13:04:38 +0100946 u64 **c;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100947
948 vlib_node_t *new_n_clone;
949
950 int j;
951
Damjan Marion6ffb7c62021-03-26 13:06:13 +0100952 vm = vlib_get_first_main ();
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100953 nm = &vm->node_main;
954 vm_clone = vlib_get_main ();
955 nm_clone = &vm_clone->node_main;
956
957 /* Re-clone error heap */
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100958 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
959
Dave Barach178cf492018-11-13 16:34:13 -0500960 clib_memcpy_fast (&vm_clone->error_main, &vm->error_main,
961 sizeof (vm->error_main));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100962 j = vec_len (vm->error_main.counters) - 1;
Ole Troan233e4682019-05-16 15:01:34 +0200963
Damjan Marion66c85832022-03-14 13:04:38 +0100964 c = vlib_stats_get_entry_data_pointer (vm->error_main.stats_err_entry_index);
965 vm_clone->error_main.counters = c[vm_clone->thread_index];
Ole Troan233e4682019-05-16 15:01:34 +0200966
967 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100968 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
969
Dmitry Valter9f5b3692022-09-05 15:30:18 +0000970 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
971 {
972 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
973 if ((nf->flags & VLIB_FRAME_IS_ALLOCATED) && nf->frame != NULL)
974 {
975 vlib_frame_t *f = nf->frame;
976 nf->frame = NULL;
977 vlib_frame_free (vm_clone, f);
978 }
979 }
980
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100981 vec_free (nm_clone->next_frames);
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200982 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
983 CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100984
985 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700986 {
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100987 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
988 u32 save_node_runtime_index;
989 u32 save_flags;
Damjan Marionbc20bdf2015-12-17 14:28:18 +0100990
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100991 save_node_runtime_index = nf->node_runtime_index;
992 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
993 vlib_next_frame_init (nf);
994 nf->node_runtime_index = save_node_runtime_index;
995 nf->flags = save_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700996 }
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100997
998 old_nodes_clone = nm_clone->nodes;
999 nm_clone->nodes = 0;
1000
1001 /* re-fork nodes */
1002
1003 /* Allocate all nodes in single block for speed */
1004 new_n_clone =
1005 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
1006 for (j = 0; j < vec_len (nm->nodes); j++)
1007 {
Benoît Ganne5517bd32019-08-30 16:20:12 +02001008 vlib_node_t *new_n = nm->nodes[j];
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001009
Dave Barach178cf492018-11-13 16:34:13 -05001010 clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001011 /* none of the copied nodes have enqueue rights given out */
1012 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1013
1014 if (j >= vec_len (old_nodes_clone))
1015 {
1016 /* new node, set to zero */
Dave Barachb7b92992018-10-17 10:38:51 -04001017 clib_memset (&new_n_clone->stats_total, 0,
1018 sizeof (new_n_clone->stats_total));
1019 clib_memset (&new_n_clone->stats_last_clear, 0,
1020 sizeof (new_n_clone->stats_last_clear));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001021 }
1022 else
1023 {
Benoît Ganne5517bd32019-08-30 16:20:12 +02001024 vlib_node_t *old_n_clone = old_nodes_clone[j];
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001025 /* Copy stats if the old data is valid */
Dave Barach178cf492018-11-13 16:34:13 -05001026 clib_memcpy_fast (&new_n_clone->stats_total,
1027 &old_n_clone->stats_total,
1028 sizeof (new_n_clone->stats_total));
1029 clib_memcpy_fast (&new_n_clone->stats_last_clear,
1030 &old_n_clone->stats_last_clear,
1031 sizeof (new_n_clone->stats_last_clear));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001032
1033 /* keep previous node state */
1034 new_n_clone->state = old_n_clone->state;
Maxime Peimdde163d2021-04-15 13:52:12 +02001035 new_n_clone->flags = old_n_clone->flags;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001036 }
1037 vec_add1 (nm_clone->nodes, new_n_clone);
1038 new_n_clone++;
1039 }
1040 /* Free the old node clones */
1041 clib_mem_free (old_nodes_clone[0]);
1042
1043 vec_free (old_nodes_clone);
1044
1045
1046 /* re-clone internal nodes */
1047 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1048 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +02001049 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1050 CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001051
1052 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1053 {
1054 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001055 /* copy runtime_data, will be overwritten later for existing rt */
1056 if (n->runtime_data && n->runtime_data_bytes > 0)
Dave Barach178cf492018-11-13 16:34:13 -05001057 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1058 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1059 n->runtime_data_bytes));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001060 }
1061
1062 for (j = 0; j < vec_len (old_rt); j++)
1063 {
1064 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1065 rt->state = old_rt[j].state;
Maxime Peimdde163d2021-04-15 13:52:12 +02001066 rt->flags = old_rt[j].flags;
Dave Barach178cf492018-11-13 16:34:13 -05001067 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1068 VLIB_NODE_RUNTIME_DATA_SIZE);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001069 }
1070
1071 vec_free (old_rt);
1072
1073 /* re-clone input nodes */
1074 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1075 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +02001076 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1077 CLIB_CACHE_LINE_BYTES);
Damjan Marion94100532020-11-06 23:25:57 +01001078 clib_interrupt_resize (
Damjan Marioncc8249c2023-07-23 14:24:22 +02001079 &nm_clone->input_node_interrupts,
Damjan Marion94100532020-11-06 23:25:57 +01001080 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
Damjan Marioncc8249c2023-07-23 14:24:22 +02001081 clib_interrupt_resize (
1082 &nm_clone->pre_input_node_interrupts,
1083 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT]));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001084
1085 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1086 {
1087 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001088 /* copy runtime_data, will be overwritten later for existing rt */
1089 if (n->runtime_data && n->runtime_data_bytes > 0)
Dave Barach178cf492018-11-13 16:34:13 -05001090 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1091 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1092 n->runtime_data_bytes));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001093 }
1094
1095 for (j = 0; j < vec_len (old_rt); j++)
1096 {
1097 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1098 rt->state = old_rt[j].state;
Maxime Peimdde163d2021-04-15 13:52:12 +02001099 rt->flags = old_rt[j].flags;
Dave Barach178cf492018-11-13 16:34:13 -05001100 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1101 VLIB_NODE_RUNTIME_DATA_SIZE);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001102 }
1103
1104 vec_free (old_rt);
1105
Dave Barach53fe4a72019-01-26 09:50:26 -05001106 /* re-clone pre-input nodes */
1107 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT];
1108 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
1109 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
1110 CLIB_CACHE_LINE_BYTES);
1111
1112 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1113 {
1114 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Dave Barach53fe4a72019-01-26 09:50:26 -05001115 /* copy runtime_data, will be overwritten later for existing rt */
1116 if (n->runtime_data && n->runtime_data_bytes > 0)
1117 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1118 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1119 n->runtime_data_bytes));
1120 }
1121
1122 for (j = 0; j < vec_len (old_rt); j++)
1123 {
1124 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1125 rt->state = old_rt[j].state;
Maxime Peimdde163d2021-04-15 13:52:12 +02001126 rt->flags = old_rt[j].flags;
Dave Barach53fe4a72019-01-26 09:50:26 -05001127 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1128 VLIB_NODE_RUNTIME_DATA_SIZE);
1129 }
1130
1131 vec_free (old_rt);
1132
Vladislav Grishenko29733502021-09-25 21:00:59 +05001133 vec_free (nm_clone->processes);
Damjan Marionbe3f4d52018-03-27 21:06:10 +02001134 nm_clone->processes = vec_dup_aligned (nm->processes,
1135 CLIB_CACHE_LINE_BYTES);
Dave Barach687c9022019-07-23 10:22:31 -04001136 nm_clone->node_by_error = nm->node_by_error;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001137}
1138
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001139void
1140vlib_worker_thread_node_runtime_update (void)
1141{
1142 /*
1143 * Make a note that we need to do a node runtime update
1144 * prior to releasing the barrier.
1145 */
1146 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001147}
1148
Pavel Kotucek1e765832016-09-23 08:54:14 +02001149u32
1150unformat_sched_policy (unformat_input_t * input, va_list * args)
1151{
1152 u32 *r = va_arg (*args, u32 *);
1153
1154 if (0);
1155#define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1156 foreach_sched_policy
1157#undef _
1158 else
1159 return 0;
1160 return 1;
1161}
1162
Ed Warnickecb9cada2015-12-08 15:45:58 -07001163static clib_error_t *
1164cpu_config (vlib_main_t * vm, unformat_input_t * input)
1165{
1166 vlib_thread_registration_t *tr;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001167 uword *p;
1168 vlib_thread_main_t *tm = &vlib_thread_main;
1169 u8 *name;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001170 uword *bitmap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001171 u32 count;
1172
1173 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
Pavel Kotucek1e765832016-09-23 08:54:14 +02001174
Dave Barach9b8ffd92016-07-08 08:13:45 -04001175 tm->n_thread_stacks = 1; /* account for main thread */
Pavel Kotucek1e765832016-09-23 08:54:14 +02001176 tm->sched_policy = ~0;
1177 tm->sched_priority = ~0;
Damjan Marion858151f2018-07-11 10:51:00 +02001178 tm->main_lcore = ~0;
hsandid71c32a82024-03-25 17:51:31 +01001179 tm->use_main_core_auto = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001180
1181 tr = tm->next;
1182
1183 while (tr)
1184 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001185 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001186 tr = tr->next;
1187 }
1188
Dave Barach9b8ffd92016-07-08 08:13:45 -04001189 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001190 {
Damjan Marionbf741472016-06-13 22:49:44 +02001191 if (unformat (input, "use-pthreads"))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001192 tm->use_pthreads = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001193 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001194 ;
hsandid71c32a82024-03-25 17:51:31 +01001195 else if (unformat (input, "main-core auto"))
1196 tm->use_main_core_auto = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001197 else if (unformat (input, "main-core %u", &tm->main_lcore))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001198 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001199 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001200 ;
Dave Baracha690fdb2020-01-21 12:34:55 -05001201 else if (unformat (input, "numa-heap-size %U",
1202 unformat_memory_size, &tm->numa_heap_size))
1203 ;
Yi Hee4a9eb72018-07-17 14:18:41 +08001204 else if (unformat (input, "coremask-%s %U", &name,
1205 unformat_bitmap_mask, &bitmap) ||
1206 unformat (input, "corelist-%s %U", &name,
1207 unformat_bitmap_list, &bitmap))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001208 {
1209 p = hash_get_mem (tm->thread_registrations_by_name, name);
1210 if (p == 0)
1211 return clib_error_return (0, "no such thread type '%s'", name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001212
Dave Barach9b8ffd92016-07-08 08:13:45 -04001213 tr = (vlib_thread_registration_t *) p[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001214
Dave Barach9b8ffd92016-07-08 08:13:45 -04001215 if (tr->use_pthreads)
1216 return clib_error_return (0,
1217 "corelist cannot be set for '%s' threads",
1218 name);
Vladimir Isaev18a4a372020-03-17 12:30:11 +03001219 if (tr->count)
1220 return clib_error_return
1221 (0, "core placement of '%s' threads is already configured",
1222 name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001223
Dave Barach9b8ffd92016-07-08 08:13:45 -04001224 tr->coremask = bitmap;
1225 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1226 }
Pavel Kotucek1e765832016-09-23 08:54:14 +02001227 else
1228 if (unformat
1229 (input, "scheduler-policy %U", unformat_sched_policy,
1230 &tm->sched_policy))
1231 ;
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001232 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
Pavel Kotucek1e765832016-09-23 08:54:14 +02001233 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001234 else if (unformat (input, "%s %u", &name, &count))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001235 {
1236 p = hash_get_mem (tm->thread_registrations_by_name, name);
1237 if (p == 0)
Pavel Kotucek1e765832016-09-23 08:54:14 +02001238 return clib_error_return (0, "no such thread type 3 '%s'", name);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001239
1240 tr = (vlib_thread_registration_t *) p[0];
Vladimir Isaev18a4a372020-03-17 12:30:11 +03001241
Dave Barach9b8ffd92016-07-08 08:13:45 -04001242 if (tr->fixed_count)
1243 return clib_error_return
Vladimir Isaev18a4a372020-03-17 12:30:11 +03001244 (0, "number of '%s' threads not configurable", name);
1245 if (tr->count)
1246 return clib_error_return
1247 (0, "number of '%s' threads is already configured", name);
1248
Dave Barach9b8ffd92016-07-08 08:13:45 -04001249 tr->count = count;
1250 }
1251 else
1252 break;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001253 }
1254
hsandid71c32a82024-03-25 17:51:31 +01001255 if (tm->main_lcore != ~0 && tm->use_main_core_auto)
1256 {
1257 return clib_error_return (
1258 0, "cannot set both 'main-core %u' and 'main-core auto'",
1259 tm->main_lcore);
1260 }
1261
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001262 if (tm->sched_priority != ~0)
Pavel Kotucek1e765832016-09-23 08:54:14 +02001263 {
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001264 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
Pavel Kotucek1e765832016-09-23 08:54:14 +02001265 {
1266 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1267 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1268 if (tm->sched_priority > prio_max)
1269 tm->sched_priority = prio_max;
1270 if (tm->sched_priority < prio_min)
1271 tm->sched_priority = prio_min;
1272 }
1273 else
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001274 {
1275 return clib_error_return
1276 (0,
1277 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1278 tm->sched_priority);
1279 }
Pavel Kotucek1e765832016-09-23 08:54:14 +02001280 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001281 tr = tm->next;
1282
1283 if (!tm->thread_prefix)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001284 tm->thread_prefix = format (0, "vpp");
Ed Warnickecb9cada2015-12-08 15:45:58 -07001285
1286 while (tr)
1287 {
1288 tm->n_thread_stacks += tr->count;
1289 tm->n_pthreads += tr->count * tr->use_pthreads;
Damjan Marion878c6092017-01-04 13:19:27 +01001290 tm->n_threads += tr->count * (tr->use_pthreads == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001291 tr = tr->next;
1292 }
1293
1294 return 0;
1295}
1296
1297VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1298
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001299 /*
1300 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1301 * based on a test based heuristic that barrier should be open for at least
1302 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1303 * point it is probably too late to make a difference)
1304 */
1305
1306#ifndef BARRIER_MINIMUM_OPEN_LIMIT
1307#define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1308#endif
1309
1310#ifndef BARRIER_MINIMUM_OPEN_FACTOR
1311#define BARRIER_MINIMUM_OPEN_FACTOR 3
1312#endif
1313
Dave Barach9b8ffd92016-07-08 08:13:45 -04001314void
Dave Barachc602b382019-06-03 19:48:22 -04001315vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
1316{
1317 f64 deadline;
1318 f64 now = vlib_time_now (vm);
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001319 u32 count = vlib_get_n_threads () - 1;
Dave Barachc602b382019-06-03 19:48:22 -04001320
1321 /* No worker threads? */
1322 if (count == 0)
1323 return;
1324
1325 deadline = now + BARRIER_SYNC_TIMEOUT;
1326 *vlib_worker_threads->wait_at_barrier = 1;
1327 while (*vlib_worker_threads->workers_at_barrier != count)
1328 {
1329 if ((now = vlib_time_now (vm)) > deadline)
1330 {
1331 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1332 os_panic ();
1333 }
1334 CLIB_PAUSE ();
1335 }
1336 *vlib_worker_threads->wait_at_barrier = 0;
1337}
1338
Neale Ranns42845dd2020-05-26 13:12:17 +00001339/**
1340 * Return true if the wroker thread barrier is held
1341 */
1342u8
1343vlib_worker_thread_barrier_held (void)
1344{
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001345 if (vlib_get_n_threads () < 2)
Neale Ranns42845dd2020-05-26 13:12:17 +00001346 return (1);
1347
1348 return (*vlib_worker_threads->wait_at_barrier == 1);
1349}
1350
Dave Barachc602b382019-06-03 19:48:22 -04001351void
Damjan Marion8343ee52019-02-26 17:15:48 +01001352vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001353{
1354 f64 deadline;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001355 f64 now;
1356 f64 t_entry;
1357 f64 t_open;
1358 f64 t_closed;
Dave Barach9ae190e2019-04-23 10:07:24 -04001359 f64 max_vector_rate;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001360 u32 count;
Dave Barach9ae190e2019-04-23 10:07:24 -04001361 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001362
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001363 if (vlib_get_n_threads () < 2)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001364 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001365
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001366 ASSERT (vlib_get_thread_index () == 0);
1367
Damjan Marion8343ee52019-02-26 17:15:48 +01001368 vlib_worker_threads[0].barrier_caller = func_name;
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001369 count = vlib_get_n_threads () - 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001370
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001371 /* Record entry relative to last close */
1372 now = vlib_time_now (vm);
1373 t_entry = now - vm->barrier_epoch;
1374
Ed Warnickecb9cada2015-12-08 15:45:58 -07001375 /* Tolerate recursive calls */
1376 if (++vlib_worker_threads[0].recursion_level > 1)
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001377 {
1378 barrier_trace_sync_rec (t_entry);
1379 return;
1380 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001381
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001382 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1383 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1384 vm->clib_time.last_cpu_time, 0 /* enter */ );
1385
Dave Barach9ae190e2019-04-23 10:07:24 -04001386 /*
1387 * Need data to decide if we're working hard enough to honor
1388 * the barrier hold-down timer.
1389 */
1390 max_vector_rate = 0.0;
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001391 for (i = 1; i < vlib_get_n_threads (); i++)
1392 {
1393 vlib_main_t *ovm = vlib_get_main_by_index (i);
1394 max_vector_rate = clib_max (max_vector_rate,
1395 (f64) vlib_last_vectors_per_main_loop (ovm));
1396 }
Dave Barach9ae190e2019-04-23 10:07:24 -04001397
Bud Grise42f20062016-03-16 13:09:46 -04001398 vlib_worker_threads[0].barrier_sync_count++;
1399
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001400 /* Enforce minimum barrier open time to minimize packet loss */
1401 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001402
Dave Barach9ae190e2019-04-23 10:07:24 -04001403 /*
1404 * If any worker thread seems busy, which we define
1405 * as a vector rate above 10, we enforce the barrier hold-down timer
1406 */
1407 if (max_vector_rate > 10.0)
Dave Barach36feebb2018-09-07 11:12:27 -04001408 {
Dave Barach9ae190e2019-04-23 10:07:24 -04001409 while (1)
Dave Barach36feebb2018-09-07 11:12:27 -04001410 {
Dave Barach9ae190e2019-04-23 10:07:24 -04001411 now = vlib_time_now (vm);
1412 /* Barrier hold-down timer expired? */
1413 if (now >= vm->barrier_no_close_before)
1414 break;
1415 if ((vm->barrier_no_close_before - now)
1416 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1417 {
1418 clib_warning
1419 ("clock change: would have waited for %.4f seconds",
1420 (vm->barrier_no_close_before - now));
1421 break;
1422 }
Dave Barach36feebb2018-09-07 11:12:27 -04001423 }
1424 }
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001425 /* Record time of closure */
1426 t_open = now - vm->barrier_epoch;
1427 vm->barrier_epoch = now;
1428
1429 deadline = now + BARRIER_SYNC_TIMEOUT;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001430
1431 *vlib_worker_threads->wait_at_barrier = 1;
1432 while (*vlib_worker_threads->workers_at_barrier != count)
1433 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001434 if ((now = vlib_time_now (vm)) > deadline)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001435 {
1436 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1437 os_panic ();
1438 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001439 }
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001440
1441 t_closed = now - vm->barrier_epoch;
1442
1443 barrier_trace_sync (t_entry, t_open, t_closed);
1444
Ed Warnickecb9cada2015-12-08 15:45:58 -07001445}
1446
Dave Barach9b8ffd92016-07-08 08:13:45 -04001447void
1448vlib_worker_thread_barrier_release (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001449{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001450 vlib_global_main_t *vgm = vlib_get_global_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001451 f64 deadline;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001452 f64 now;
1453 f64 minimum_open;
1454 f64 t_entry;
1455 f64 t_closed_total;
1456 f64 t_update_main = 0.0;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001457 int refork_needed = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001458
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001459 if (vlib_get_n_threads () < 2)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001460 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001461
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001462 ASSERT (vlib_get_thread_index () == 0);
1463
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001464
1465 now = vlib_time_now (vm);
1466 t_entry = now - vm->barrier_epoch;
1467
Ed Warnickecb9cada2015-12-08 15:45:58 -07001468 if (--vlib_worker_threads[0].recursion_level > 0)
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001469 {
1470 barrier_trace_release_rec (t_entry);
1471 return;
1472 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001473
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001474 /* Update (all) node runtimes before releasing the barrier, if needed */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001475 if (vgm->need_vlib_worker_thread_node_runtime_update)
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001476 {
Dave Barach1ddbc012018-06-13 09:26:05 -04001477 /*
1478 * Lock stat segment here, so we's safe when
1479 * rebuilding the stat segment node clones from the
1480 * stat thread...
1481 */
Damjan Marion8973b072022-03-01 15:51:18 +01001482 vlib_stats_segment_lock ();
Dave Barach1ddbc012018-06-13 09:26:05 -04001483
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001484 /* Do stats elements on main thread */
1485 worker_thread_node_runtime_update_internal ();
Damjan Marionfd8deb42021-03-06 12:26:28 +01001486 vgm->need_vlib_worker_thread_node_runtime_update = 0;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001487
1488 /* Do per thread rebuilds in parallel */
1489 refork_needed = 1;
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001490 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001491 (vlib_get_n_threads () - 1));
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001492 now = vlib_time_now (vm);
1493 t_update_main = now - vm->barrier_epoch;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001494 }
1495
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001496 deadline = now + BARRIER_SYNC_TIMEOUT;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001497
Dave Baracha4324a92019-02-19 17:05:30 -05001498 /*
1499 * Note when we let go of the barrier.
1500 * Workers can use this to derive a reasonably accurate
1501 * time offset. See vlib_time_now(...)
1502 */
1503 vm->time_last_barrier_release = vlib_time_now (vm);
1504 CLIB_MEMORY_STORE_BARRIER ();
1505
Ed Warnickecb9cada2015-12-08 15:45:58 -07001506 *vlib_worker_threads->wait_at_barrier = 0;
1507
1508 while (*vlib_worker_threads->workers_at_barrier > 0)
1509 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001510 if ((now = vlib_time_now (vm)) > deadline)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001511 {
1512 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1513 os_panic ();
1514 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001515 }
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001516
1517 /* Wait for reforks before continuing */
1518 if (refork_needed)
1519 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001520 now = vlib_time_now (vm);
1521
1522 deadline = now + BARRIER_SYNC_TIMEOUT;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001523
1524 while (*vlib_worker_threads->node_reforks_required > 0)
1525 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001526 if ((now = vlib_time_now (vm)) > deadline)
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001527 {
1528 fformat (stderr, "%s: worker thread refork deadlock\n",
1529 __FUNCTION__);
1530 os_panic ();
1531 }
1532 }
Damjan Marion8973b072022-03-01 15:51:18 +01001533 vlib_stats_segment_unlock ();
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001534 }
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001535
1536 t_closed_total = now - vm->barrier_epoch;
1537
1538 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1539
1540 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1541 {
1542 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1543 }
1544
1545 vm->barrier_no_close_before = now + minimum_open;
1546
1547 /* Record barrier epoch (used to enforce minimum open time) */
1548 vm->barrier_epoch = now;
1549
1550 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1551
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001552 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1553 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1554 vm->clib_time.last_cpu_time, 1 /* leave */ );
Ed Warnickecb9cada2015-12-08 15:45:58 -07001555}
1556
Florin Coras4b208302022-03-30 13:50:19 -07001557static void
1558vlib_worker_sync_rpc (void *args)
1559{
1560 ASSERT (vlib_thread_is_main_w_barrier ());
1561 vlib_worker_threads->wait_before_barrier = 0;
1562}
1563
1564void
1565vlib_workers_sync (void)
1566{
1567 if (PREDICT_FALSE (!vlib_num_workers ()))
1568 return;
1569
1570 if (!(*vlib_worker_threads->wait_at_barrier) &&
1571 !clib_atomic_swap_rel_n (&vlib_worker_threads->wait_before_barrier, 1))
1572 {
1573 u32 thread_index = vlib_get_thread_index ();
1574 vlib_rpc_call_main_thread (vlib_worker_sync_rpc, (u8 *) &thread_index,
1575 sizeof (thread_index));
Florin Corase060b0a2024-02-01 21:13:10 -08001576 vlib_worker_flush_pending_rpc_requests (vlib_get_main ());
Florin Coras4b208302022-03-30 13:50:19 -07001577 }
1578
1579 /* Wait until main thread asks for barrier */
1580 while (!(*vlib_worker_threads->wait_at_barrier))
1581 ;
1582
1583 /* Stop before barrier and make sure all threads are either
1584 * at worker barrier or the barrier before it */
1585 clib_atomic_fetch_add (&vlib_worker_threads->workers_before_barrier, 1);
1586 while (vlib_num_workers () > (*vlib_worker_threads->workers_at_barrier +
1587 vlib_worker_threads->workers_before_barrier))
1588 ;
1589}
1590
1591void
1592vlib_workers_continue (void)
1593{
1594 if (PREDICT_FALSE (!vlib_num_workers ()))
1595 return;
1596
1597 clib_atomic_fetch_add (&vlib_worker_threads->done_work_before_barrier, 1);
1598
1599 /* Wait until all workers are done with work before barrier */
1600 while (vlib_worker_threads->done_work_before_barrier <
1601 vlib_worker_threads->workers_before_barrier)
1602 ;
1603
1604 clib_atomic_fetch_add (&vlib_worker_threads->done_work_before_barrier, -1);
1605 clib_atomic_fetch_add (&vlib_worker_threads->workers_before_barrier, -1);
1606}
1607
Neale Ranns42845dd2020-05-26 13:12:17 +00001608/**
1609 * Wait until each of the workers has been once around the track
1610 */
1611void
1612vlib_worker_wait_one_loop (void)
1613{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001614 vlib_global_main_t *vgm = vlib_get_global_main ();
Neale Ranns42845dd2020-05-26 13:12:17 +00001615 ASSERT (vlib_get_thread_index () == 0);
1616
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001617 if (vlib_get_n_threads () < 2)
Neale Ranns42845dd2020-05-26 13:12:17 +00001618 return;
1619
1620 if (vlib_worker_thread_barrier_held ())
1621 return;
1622
1623 u32 *counts = 0;
1624 u32 ii;
1625
Damjan Marion6ffb7c62021-03-26 13:06:13 +01001626 vec_validate (counts, vlib_get_n_threads () - 1);
Neale Ranns42845dd2020-05-26 13:12:17 +00001627
1628 /* record the current loop counts */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001629 vec_foreach_index (ii, vgm->vlib_mains)
1630 counts[ii] = vgm->vlib_mains[ii]->main_loop_count;
Neale Ranns42845dd2020-05-26 13:12:17 +00001631
1632 /* spin until each changes, apart from the main thread, or we'd be
1633 * a while */
1634 for (ii = 1; ii < vec_len (counts); ii++)
1635 {
Damjan Marionfd8deb42021-03-06 12:26:28 +01001636 while (counts[ii] == vgm->vlib_mains[ii]->main_loop_count)
Neale Ranns42845dd2020-05-26 13:12:17 +00001637 CLIB_PAUSE ();
1638 }
1639
1640 vec_free (counts);
1641 return;
1642}
1643
Dave Barach9b8ffd92016-07-08 08:13:45 -04001644void
Florin Coras4cadd3b2024-02-01 20:46:15 -08001645vlib_worker_flush_pending_rpc_requests (vlib_main_t *vm)
1646{
1647 vlib_main_t *vm_global = vlib_get_first_main ();
1648
1649 ASSERT (vm != vm_global);
1650
1651 clib_spinlock_lock_if_init (&vm_global->pending_rpc_lock);
1652 vec_append (vm_global->pending_rpc_requests, vm->pending_rpc_requests);
1653 vec_reset_length (vm->pending_rpc_requests);
1654 clib_spinlock_unlock_if_init (&vm_global->pending_rpc_lock);
1655}
1656
1657void
Dave Barach9b8ffd92016-07-08 08:13:45 -04001658vlib_worker_thread_fn (void *arg)
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001659{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001660 vlib_global_main_t *vgm = vlib_get_global_main ();
Dave Barach9b8ffd92016-07-08 08:13:45 -04001661 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001662 vlib_main_t *vm = vlib_get_main ();
Damjan Marione9f929b2017-03-16 11:32:09 +01001663 clib_error_t *e;
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001664
Damjan Marion586afd72017-04-05 19:18:20 +02001665 ASSERT (vm->thread_index == vlib_get_thread_index ());
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001666
1667 vlib_worker_thread_init (w);
1668 clib_time_init (&vm->clib_time);
1669 clib_mem_set_heap (w->thread_mheap);
1670
Damjan Marionfd8deb42021-03-06 12:26:28 +01001671 vm->worker_init_functions_called = hash_create (0, 0);
1672
1673 e = vlib_call_init_exit_functions_no_sort (
1674 vm, &vgm->worker_init_function_registrations, 1 /* call_once */,
1675 0 /* is_global */);
Damjan Marione9f929b2017-03-16 11:32:09 +01001676 if (e)
1677 clib_error_report (e);
1678
Damjan Marione9d52d52017-03-09 15:42:26 +01001679 vlib_worker_loop (vm);
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001680}
1681
1682VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1683 .name = "workers",
1684 .short_name = "wk",
1685 .function = vlib_worker_thread_fn,
1686};
1687
Mohammed Hawarie7149262022-05-18 10:08:47 +02001688extern clib_march_fn_registration
1689 *vlib_frame_queue_dequeue_with_aux_fn_march_fn_registrations;
1690extern clib_march_fn_registration
1691 *vlib_frame_queue_dequeue_fn_march_fn_registrations;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001692u32
1693vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1694{
1695 vlib_thread_main_t *tm = vlib_get_thread_main ();
Mohammed Hawarie7149262022-05-18 10:08:47 +02001696 vlib_main_t *vm = vlib_get_main ();
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001697 vlib_frame_queue_main_t *fqm;
1698 vlib_frame_queue_t *fq;
Mohammed Hawarie7149262022-05-18 10:08:47 +02001699 vlib_node_t *node;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001700 int i;
Elias Rudberg368104d2020-04-16 16:01:52 +02001701 u32 num_threads;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001702
1703 if (frame_queue_nelts == 0)
dongjuan88752482019-06-04 10:59:02 +08001704 frame_queue_nelts = FRAME_QUEUE_MAX_NELTS;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001705
Elias Rudberg368104d2020-04-16 16:01:52 +02001706 num_threads = 1 /* main thread */ + tm->n_threads;
1707 ASSERT (frame_queue_nelts >= 8 + num_threads);
Damjan Marion78fd7e82018-07-20 18:47:05 +02001708
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001709 vec_add2 (tm->frame_queue_mains, fqm, 1);
1710
Mohammed Hawarie7149262022-05-18 10:08:47 +02001711 node = vlib_get_node (vm, fqm->node_index);
1712 ASSERT (node);
1713 if (node->aux_offset)
1714 {
1715 fqm->frame_queue_dequeue_fn =
1716 CLIB_MARCH_FN_VOID_POINTER (vlib_frame_queue_dequeue_with_aux_fn);
1717 }
1718 else
1719 {
1720 fqm->frame_queue_dequeue_fn =
1721 CLIB_MARCH_FN_VOID_POINTER (vlib_frame_queue_dequeue_fn);
1722 }
1723
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001724 fqm->node_index = node_index;
Damjan Marion78fd7e82018-07-20 18:47:05 +02001725 fqm->frame_queue_nelts = frame_queue_nelts;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001726
1727 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
Damjan Marion8bea5892022-04-04 22:40:45 +02001728 vec_set_len (fqm->vlib_frame_queues, 0);
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001729 for (i = 0; i < tm->n_vlib_mains; i++)
1730 {
1731 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1732 vec_add1 (fqm->vlib_frame_queues, fq);
1733 }
1734
1735 return (fqm - tm->frame_queue_mains);
1736}
1737
Dave Barach69128d02017-09-26 10:54:34 -04001738void
1739vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1740 args)
1741{
1742 ASSERT (vlib_get_thread_index () == 0);
1743 vlib_process_signal_event (vlib_get_main (), args->node_index,
1744 args->type_opaque, args->data);
1745}
1746
1747void *rpc_call_main_thread_cb_fn;
1748
1749void
1750vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1751{
1752 if (rpc_call_main_thread_cb_fn)
1753 {
1754 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1755 (*fp) (callback, args, arg_size);
1756 }
1757 else
1758 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1759}
1760
Dave Barach9b8ffd92016-07-08 08:13:45 -04001761clib_error_t *
1762threads_init (vlib_main_t * vm)
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001763{
Benoît Ganne8b153de2021-12-09 18:24:21 +01001764 const vlib_thread_main_t *tm = vlib_get_thread_main ();
1765
1766 if (tm->main_lcore == ~0 && tm->n_vlib_mains > 1)
1767 return clib_error_return (0, "Configuration error, a main core must "
1768 "be specified when using worker threads");
1769
Ed Warnickecb9cada2015-12-08 15:45:58 -07001770 return 0;
1771}
1772
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001773VLIB_INIT_FUNCTION (threads_init);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001774
Dave Baracha4324a92019-02-19 17:05:30 -05001775static clib_error_t *
1776show_clock_command_fn (vlib_main_t * vm,
1777 unformat_input_t * input, vlib_cli_command_t * cmd)
1778{
Dave Barachc25048b2020-01-29 18:05:24 -05001779 int verbose = 0;
Dave Barach19718002020-03-11 10:31:36 -04001780 clib_timebase_t _tb, *tb = &_tb;
Dave Baracha4324a92019-02-19 17:05:30 -05001781
Dave Barachc25048b2020-01-29 18:05:24 -05001782 (void) unformat (input, "verbose %=", &verbose, 1);
Dave Baracha4324a92019-02-19 17:05:30 -05001783
Dave Barach19718002020-03-11 10:31:36 -04001784 clib_timebase_init (tb, 0 /* GMT */ , CLIB_TIMEBASE_DAYLIGHT_NONE,
1785 &vm->clib_time);
1786
1787 vlib_cli_output (vm, "%U, %U GMT", format_clib_time, &vm->clib_time,
1788 verbose, format_clib_timebase_time,
1789 clib_timebase_now (tb));
Dave Baracha4324a92019-02-19 17:05:30 -05001790
Dave Baracha4324a92019-02-19 17:05:30 -05001791 vlib_cli_output (vm, "Time last barrier release %.9f",
1792 vm->time_last_barrier_release);
1793
Benoît Ganne56eccdb2021-08-20 09:18:31 +02001794 foreach_vlib_main ()
Dave Baracha4324a92019-02-19 17:05:30 -05001795 {
Benoît Ganne56eccdb2021-08-20 09:18:31 +02001796 vlib_cli_output (vm, "%d: %U", this_vlib_main->thread_index,
1797 format_clib_time, &this_vlib_main->clib_time, verbose);
Dave Barachc25048b2020-01-29 18:05:24 -05001798
Benoît Ganne56eccdb2021-08-20 09:18:31 +02001799 vlib_cli_output (vm, "Thread %d offset %.9f error %.9f",
1800 this_vlib_main->thread_index,
1801 this_vlib_main->time_offset,
1802 vm->time_last_barrier_release -
1803 this_vlib_main->time_last_barrier_release);
Dave Baracha4324a92019-02-19 17:05:30 -05001804 }
1805 return 0;
1806}
1807
Dave Baracha4324a92019-02-19 17:05:30 -05001808VLIB_CLI_COMMAND (f_command, static) =
1809{
1810 .path = "show clock",
1811 .short_help = "show clock",
1812 .function = show_clock_command_fn,
1813};
Dave Baracha4324a92019-02-19 17:05:30 -05001814
Dave Barachab1a50c2020-10-06 14:08:16 -04001815vlib_thread_main_t *
1816vlib_get_thread_main_not_inline (void)
1817{
1818 return vlib_get_thread_main ();
1819}
1820
Dave Barach9b8ffd92016-07-08 08:13:45 -04001821/*
1822 * fd.io coding-style-patch-verification: ON
1823 *
1824 * Local Variables:
1825 * eval: (c-set-style "gnu")
1826 * End:
1827 */