blob: 82263797d931e4dfcb02e29b82d5ededab9f6537 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
Christophe Fontainefef15b42016-04-09 12:38:49 +090015#define _GNU_SOURCE
Christophe Fontainefef15b42016-04-09 12:38:49 +090016
Ed Warnickecb9cada2015-12-08 15:45:58 -070017#include <signal.h>
18#include <math.h>
19#include <vppinfra/format.h>
Dave Barach19718002020-03-11 10:31:36 -040020#include <vppinfra/time_range.h>
Mohsin Kazmi5d64c782018-09-11 20:27:09 +020021#include <vppinfra/linux/sysfs.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070022#include <vlib/vlib.h>
23
24#include <vlib/threads.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070025#include <vlib/unix/cj.h>
26
Ole Troan233e4682019-05-16 15:01:34 +020027#include <vlib/stat_weak_inlines.h>
28
Ed Warnickecb9cada2015-12-08 15:45:58 -070029DECLARE_CJ_GLOBAL_LOG;
30
Ed Warnickecb9cada2015-12-08 15:45:58 -070031
Dave Barach9b8ffd92016-07-08 08:13:45 -040032u32
33vl (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -070034{
35 return vec_len (p);
36}
37
Damjan Marion6a7acc22016-12-19 16:28:36 +010038vlib_worker_thread_t *vlib_worker_threads;
Ed Warnickecb9cada2015-12-08 15:45:58 -070039vlib_thread_main_t vlib_thread_main;
40
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010041/*
42 * Barrier tracing can be enabled on a normal build to collect information
43 * on barrier use, including timings and call stacks. Deliberately not
44 * keyed off CLIB_DEBUG, because that can add significant overhead which
45 * imapacts observed timings.
46 */
47
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010048static inline void
49barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
50{
Dave Barach88c6e002018-09-30 15:54:06 -040051 if (!vlib_worker_threads->barrier_elog_enabled)
52 return;
53
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010054 /* *INDENT-OFF* */
55 ELOG_TYPE_DECLARE (e) =
56 {
Dave Barach88c6e002018-09-30 15:54:06 -040057 .format = "bar-trace-%s-#%d",
58 .format_args = "T4i4",
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010059 };
60 /* *INDENT-ON* */
61 struct
62 {
Dave Barach88c6e002018-09-30 15:54:06 -040063 u32 caller, count, t_entry, t_open, t_closed;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010064 } *ed = 0;
65
66 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
67 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
Dave Barachb09f4d02019-07-15 16:00:03 -040068 ed->caller = elog_string (&vlib_global_main.elog_main,
69 (char *) vlib_worker_threads[0].barrier_caller);
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010070 ed->t_entry = (int) (1000000.0 * t_entry);
71 ed->t_open = (int) (1000000.0 * t_open);
72 ed->t_closed = (int) (1000000.0 * t_closed);
73}
74
75static inline void
76barrier_trace_sync_rec (f64 t_entry)
77{
Dave Barach88c6e002018-09-30 15:54:06 -040078 if (!vlib_worker_threads->barrier_elog_enabled)
79 return;
80
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010081 /* *INDENT-OFF* */
82 ELOG_TYPE_DECLARE (e) =
83 {
Dave Barach88c6e002018-09-30 15:54:06 -040084 .format = "bar-syncrec-%s-#%d",
85 .format_args = "T4i4",
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010086 };
87 /* *INDENT-ON* */
88 struct
89 {
Dave Barach88c6e002018-09-30 15:54:06 -040090 u32 caller, depth;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010091 } *ed = 0;
92
93 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
94 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
Dave Barachb09f4d02019-07-15 16:00:03 -040095 ed->caller = elog_string (&vlib_global_main.elog_main,
96 (char *) vlib_worker_threads[0].barrier_caller);
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +010097}
98
99static inline void
100barrier_trace_release_rec (f64 t_entry)
101{
Dave Barach88c6e002018-09-30 15:54:06 -0400102 if (!vlib_worker_threads->barrier_elog_enabled)
103 return;
104
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100105 /* *INDENT-OFF* */
106 ELOG_TYPE_DECLARE (e) =
107 {
Dave Barach88c6e002018-09-30 15:54:06 -0400108 .format = "bar-relrrec-#%d",
109 .format_args = "i4",
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100110 };
111 /* *INDENT-ON* */
112 struct
113 {
Dave Barach88c6e002018-09-30 15:54:06 -0400114 u32 depth;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100115 } *ed = 0;
116
117 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100118 ed->depth = (int) vlib_worker_threads[0].recursion_level;
119}
120
121static inline void
122barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
123{
Dave Barach88c6e002018-09-30 15:54:06 -0400124 if (!vlib_worker_threads->barrier_elog_enabled)
125 return;
126
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100127 /* *INDENT-OFF* */
128 ELOG_TYPE_DECLARE (e) =
129 {
Dave Barach88c6e002018-09-30 15:54:06 -0400130 .format = "bar-rel-#%d-e%d-u%d-t%d",
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100131 .format_args = "i4i4i4i4",
132 };
133 /* *INDENT-ON* */
134 struct
135 {
Dave Barach88c6e002018-09-30 15:54:06 -0400136 u32 count, t_entry, t_update_main, t_closed_total;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100137 } *ed = 0;
138
139 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
140 ed->t_entry = (int) (1000000.0 * t_entry);
141 ed->t_update_main = (int) (1000000.0 * t_update_main);
142 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
143 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
144
145 /* Reset context for next trace */
146 vlib_worker_threads[0].barrier_context = NULL;
147}
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100148
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149uword
Damjan Marionf55f9b82017-05-10 21:06:28 +0200150os_get_nthreads (void)
Dave Barach01d86c72016-08-08 15:13:42 -0400151{
Dave Barach2180bac2019-05-10 15:25:10 -0400152 return vec_len (vlib_thread_stacks);
Dave Barach01d86c72016-08-08 15:13:42 -0400153}
154
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155void
156vlib_set_thread_name (char *name)
157{
158 int pthread_setname_np (pthread_t __target_thread, const char *__name);
Dave Barachb2a6e252016-07-27 10:00:58 -0400159 int rv;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400160 pthread_t thread = pthread_self ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161
Dave Barach9b8ffd92016-07-08 08:13:45 -0400162 if (thread)
Dave Barachb2a6e252016-07-27 10:00:58 -0400163 {
164 rv = pthread_setname_np (thread, name);
165 if (rv)
Ed Warnicke853e7202016-08-12 11:42:26 -0700166 clib_warning ("pthread_setname_np returned %d", rv);
Dave Barachb2a6e252016-07-27 10:00:58 -0400167 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168}
169
Dave Barach9b8ffd92016-07-08 08:13:45 -0400170static int
171sort_registrations_by_no_clone (void *a0, void *a1)
172{
173 vlib_thread_registration_t **tr0 = a0;
174 vlib_thread_registration_t **tr1 = a1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175
Dave Barach9b8ffd92016-07-08 08:13:45 -0400176 return ((i32) ((*tr0)->no_data_structure_clone)
177 - ((i32) ((*tr1)->no_data_structure_clone)));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700178}
179
180static uword *
Damjan Marion01914ce2017-09-14 19:04:50 +0200181clib_sysfs_list_to_bitmap (char *filename)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700182{
183 FILE *fp;
184 uword *r = 0;
185
186 fp = fopen (filename, "r");
187
188 if (fp != NULL)
189 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400190 u8 *buffer = 0;
191 vec_validate (buffer, 256 - 1);
192 if (fgets ((char *) buffer, 256, fp))
193 {
194 unformat_input_t in;
195 unformat_init_string (&in, (char *) buffer,
196 strlen ((char *) buffer));
Dave Barachb2a6e252016-07-27 10:00:58 -0400197 if (unformat (&in, "%U", unformat_bitmap_list, &r) != 1)
Ed Warnicke853e7202016-08-12 11:42:26 -0700198 clib_warning ("unformat_bitmap_list failed");
Dave Barach9b8ffd92016-07-08 08:13:45 -0400199 unformat_free (&in);
200 }
201 vec_free (buffer);
202 fclose (fp);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203 }
204 return r;
205}
206
207
208/* Called early in the init sequence */
209
210clib_error_t *
211vlib_thread_init (vlib_main_t * vm)
212{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400213 vlib_thread_main_t *tm = &vlib_thread_main;
214 vlib_worker_thread_t *w;
215 vlib_thread_registration_t *tr;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216 u32 n_vlib_mains = 1;
217 u32 first_index = 1;
218 u32 i;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400219 uword *avail_cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220
221 /* get bitmaps of active cpu cores and sockets */
222 tm->cpu_core_bitmap =
Damjan Marion01914ce2017-09-14 19:04:50 +0200223 clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224 tm->cpu_socket_bitmap =
Damjan Marion01914ce2017-09-14 19:04:50 +0200225 clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700226
Dave Barach9b8ffd92016-07-08 08:13:45 -0400227 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228
229 /* skip cores */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400230 for (i = 0; i < tm->skip_cores; i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400232 uword c = clib_bitmap_first_set (avail_cpu);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233 if (c == ~0)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400234 return clib_error_return (0, "no available cpus to skip");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700235
Dave Barach9b8ffd92016-07-08 08:13:45 -0400236 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237 }
238
239 /* grab cpu for main thread */
Damjan Marion858151f2018-07-11 10:51:00 +0200240 if (tm->main_lcore == ~0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241 {
Damjan Marion858151f2018-07-11 10:51:00 +0200242 /* if main-lcore is not set, we try to use lcore 1 */
243 if (clib_bitmap_get (avail_cpu, 1))
244 tm->main_lcore = 1;
245 else
246 tm->main_lcore = clib_bitmap_first_set (avail_cpu);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400247 if (tm->main_lcore == (u8) ~ 0)
248 return clib_error_return (0, "no available cpus to be used for the"
249 " main thread");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250 }
251 else
252 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400253 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
254 return clib_error_return (0, "cpu %u is not available to be used"
255 " for the main thread", tm->main_lcore);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400257 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258
259 /* assume that there is socket 0 only if there is no data from sysfs */
260 if (!tm->cpu_socket_bitmap)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400261 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262
Christophe Fontainefef15b42016-04-09 12:38:49 +0900263 /* pin main thread to main_lcore */
Damjan Marion878c6092017-01-04 13:19:27 +0100264 if (tm->cb.vlib_thread_set_lcore_cb)
265 {
266 tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore);
267 }
Damjan Marion858151f2018-07-11 10:51:00 +0200268 else
269 {
270 cpu_set_t cpuset;
271 CPU_ZERO (&cpuset);
272 CPU_SET (tm->main_lcore, &cpuset);
273 pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
274 }
Christophe Fontainefef15b42016-04-09 12:38:49 +0900275
Dave Barach2180bac2019-05-10 15:25:10 -0400276 /* Set up thread 0 */
277 vec_validate_aligned (vlib_worker_threads, 0, CLIB_CACHE_LINE_BYTES);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400278 _vec_len (vlib_worker_threads) = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279 w = vlib_worker_threads;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400280 w->thread_mheap = clib_mem_get_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281 w->thread_stack = vlib_thread_stacks[0];
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200282 w->cpu_id = tm->main_lcore;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400283 w->lwp = syscall (SYS_gettid);
Pavel Kotucek98765202016-10-07 08:37:28 +0200284 w->thread_id = pthread_self ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285 tm->n_vlib_mains = 1;
286
Jon Loeligerf617b142020-01-30 18:37:47 -0600287 vlib_get_thread_core_numa (w, w->cpu_id);
288
Pavel Kotucek1e765832016-09-23 08:54:14 +0200289 if (tm->sched_policy != ~0)
290 {
291 struct sched_param sched_param;
292 if (!sched_getparam (w->lwp, &sched_param))
293 {
294 if (tm->sched_priority != ~0)
295 sched_param.sched_priority = tm->sched_priority;
296 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
297 }
298 }
299
Ed Warnickecb9cada2015-12-08 15:45:58 -0700300 /* assign threads to cores and set n_vlib_mains */
301 tr = tm->next;
302
303 while (tr)
304 {
305 vec_add1 (tm->registrations, tr);
306 tr = tr->next;
307 }
308
Dave Barach9b8ffd92016-07-08 08:13:45 -0400309 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310
311 for (i = 0; i < vec_len (tm->registrations); i++)
312 {
313 int j;
314 tr = tm->registrations[i];
315 tr->first_index = first_index;
316 first_index += tr->count;
317 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
318
319 /* construct coremask */
320 if (tr->use_pthreads || !tr->count)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400321 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322
323 if (tr->coremask)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400324 {
325 uword c;
326 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700327 clib_bitmap_foreach (c, tr->coremask, ({
328 if (clib_bitmap_get(avail_cpu, c) == 0)
329 return clib_error_return (0, "cpu %u is not available to be used"
330 " for the '%s' thread",c, tr->name);
331
332 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
333 }));
Dave Barach2180bac2019-05-10 15:25:10 -0400334 /* *INDENT-ON* */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400335 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400337 {
338 for (j = 0; j < tr->count; j++)
339 {
Vladimir Isaev2ed42042020-03-17 12:56:31 +0300340 /* Do not use CPU 0 by default - leave it to the host and IRQs */
341 uword avail_c0 = clib_bitmap_get (avail_cpu, 0);
342 avail_cpu = clib_bitmap_set (avail_cpu, 0, 0);
343
Dave Barach9b8ffd92016-07-08 08:13:45 -0400344 uword c = clib_bitmap_first_set (avail_cpu);
Vladimir Isaev2ed42042020-03-17 12:56:31 +0300345 /* Use CPU 0 as a last resort */
346 if (c == ~0 && avail_c0)
347 {
348 c = 0;
349 avail_c0 = 0;
350 }
351
Dave Barach9b8ffd92016-07-08 08:13:45 -0400352 if (c == ~0)
353 return clib_error_return (0,
354 "no available cpus to be used for"
355 " the '%s' thread", tr->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700356
Vladimir Isaev2ed42042020-03-17 12:56:31 +0300357 avail_cpu = clib_bitmap_set (avail_cpu, 0, avail_c0);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400358 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
359 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
360 }
361 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362 }
363
Dave Barach9b8ffd92016-07-08 08:13:45 -0400364 clib_bitmap_free (avail_cpu);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700365
366 tm->n_vlib_mains = n_vlib_mains;
367
Dave Barach2180bac2019-05-10 15:25:10 -0400368 /*
369 * Allocate the remaining worker threads, and thread stack vector slots
370 * from now on, calls to os_get_nthreads() will return the correct
371 * answer.
372 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400373 vec_validate_aligned (vlib_worker_threads, first_index - 1,
374 CLIB_CACHE_LINE_BYTES);
Dave Barach2180bac2019-05-10 15:25:10 -0400375 vec_validate (vlib_thread_stacks, vec_len (vlib_worker_threads) - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376 return 0;
377}
378
Dave Barach9b8ffd92016-07-08 08:13:45 -0400379vlib_frame_queue_t *
380vlib_frame_queue_alloc (int nelts)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700381{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400382 vlib_frame_queue_t *fq;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700383
Dave Barach9b8ffd92016-07-08 08:13:45 -0400384 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
Dave Barachb7b92992018-10-17 10:38:51 -0400385 clib_memset (fq, 0, sizeof (*fq));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700386 fq->nelts = nelts;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400387 fq->vector_threshold = 128; // packets
388 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700389
390 if (1)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400391 {
392 if (((uword) & fq->tail) & (CLIB_CACHE_LINE_BYTES - 1))
393 fformat (stderr, "WARNING: fq->tail unaligned\n");
394 if (((uword) & fq->head) & (CLIB_CACHE_LINE_BYTES - 1))
395 fformat (stderr, "WARNING: fq->head unaligned\n");
396 if (((uword) fq->elts) & (CLIB_CACHE_LINE_BYTES - 1))
397 fformat (stderr, "WARNING: fq->elts unaligned\n");
398
399 if (sizeof (fq->elts[0]) % CLIB_CACHE_LINE_BYTES)
400 fformat (stderr, "WARNING: fq->elts[0] size %d\n",
401 sizeof (fq->elts[0]));
402 if (nelts & (nelts - 1))
403 {
404 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
405 abort ();
406 }
407 }
408
Ed Warnickecb9cada2015-12-08 15:45:58 -0700409 return (fq);
410}
411
412void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400413void
414vl_msg_api_handler_no_free (void *v)
415{
416}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700417
418/* Turned off, save as reference material... */
419#if 0
Dave Barach9b8ffd92016-07-08 08:13:45 -0400420static inline int
421vlib_frame_queue_dequeue_internal (int thread_id,
422 vlib_main_t * vm, vlib_node_main_t * nm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700423{
424 vlib_frame_queue_t *fq = vlib_frame_queues[thread_id];
425 vlib_frame_queue_elt_t *elt;
426 vlib_frame_t *f;
427 vlib_pending_frame_t *p;
428 vlib_node_runtime_t *r;
429 u32 node_runtime_index;
430 int msg_type;
431 u64 before;
432 int processed = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400433
434 ASSERT (vm == vlib_mains[thread_id]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435
436 while (1)
437 {
438 if (fq->head == fq->tail)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400439 return processed;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700440
Dave Barach9b8ffd92016-07-08 08:13:45 -0400441 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442
443 if (!elt->valid)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400444 return processed;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700445
Dave Barach9b8ffd92016-07-08 08:13:45 -0400446 before = clib_cpu_time_now ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700447
448 f = elt->frame;
449 node_runtime_index = elt->node_runtime_index;
450 msg_type = elt->msg_type;
451
452 switch (msg_type)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400453 {
454 case VLIB_FRAME_QUEUE_ELT_FREE_BUFFERS:
455 vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
456 /* note fallthrough... */
457 case VLIB_FRAME_QUEUE_ELT_FREE_FRAME:
458 r = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
459 node_runtime_index);
460 vlib_frame_free (vm, r, f);
461 break;
462 case VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME:
463 vec_add2 (vm->node_main.pending_frames, p, 1);
464 f->flags |= (VLIB_FRAME_PENDING | VLIB_FRAME_FREE_AFTER_DISPATCH);
465 p->node_runtime_index = elt->node_runtime_index;
466 p->frame_index = vlib_frame_index (vm, f);
467 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
468 fq->dequeue_vectors += (u64) f->n_vectors;
469 break;
470 case VLIB_FRAME_QUEUE_ELT_API_MSG:
471 vl_msg_api_handler_no_free (f);
472 break;
473 default:
474 clib_warning ("bogus frame queue message, type %d", msg_type);
475 break;
476 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700477 elt->valid = 0;
478 fq->dequeues++;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400479 fq->dequeue_ticks += clib_cpu_time_now () - before;
480 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700481 fq->head++;
482 processed++;
483 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400484 ASSERT (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485 return processed;
486}
487
Dave Barach9b8ffd92016-07-08 08:13:45 -0400488int
489vlib_frame_queue_dequeue (int thread_id,
490 vlib_main_t * vm, vlib_node_main_t * nm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491{
492 return vlib_frame_queue_dequeue_internal (thread_id, vm, nm);
493}
494
Dave Barach9b8ffd92016-07-08 08:13:45 -0400495int
496vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
497 u32 frame_queue_index, vlib_frame_t * frame,
498 vlib_frame_queue_msg_type_t type)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700499{
500 vlib_frame_queue_t *fq = vlib_frame_queues[frame_queue_index];
501 vlib_frame_queue_elt_t *elt;
502 u32 save_count;
503 u64 new_tail;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400504 u64 before = clib_cpu_time_now ();
505
Ed Warnickecb9cada2015-12-08 15:45:58 -0700506 ASSERT (fq);
507
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000508 new_tail = clib_atomic_add_fetch (&fq->tail, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700509
510 /* Wait until a ring slot is available */
511 while (new_tail >= fq->head + fq->nelts)
512 {
513 f64 b4 = vlib_time_now_ticks (vm, before);
514 vlib_worker_thread_barrier_check (vm, b4);
515 /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */
Damjan Marion586afd72017-04-05 19:18:20 +0200516 // vlib_frame_queue_dequeue (vm->thread_index, vm, nm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700517 }
518
Dave Barach9b8ffd92016-07-08 08:13:45 -0400519 elt = fq->elts + (new_tail & (fq->nelts - 1));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700520
521 /* this would be very bad... */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400522 while (elt->valid)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700523 {
524 }
525
526 /* Once we enqueue the frame, frame->n_vectors is owned elsewhere... */
527 save_count = frame->n_vectors;
528
529 elt->frame = frame;
530 elt->node_runtime_index = node_runtime_index;
531 elt->msg_type = type;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400532 CLIB_MEMORY_BARRIER ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700533 elt->valid = 1;
534
535 return save_count;
536}
537#endif /* 0 */
538
539/* To be called by vlib worker threads upon startup */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400540void
541vlib_worker_thread_init (vlib_worker_thread_t * w)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700542{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400543 vlib_thread_main_t *tm = vlib_get_thread_main ();
544
Dave Barachfdf49442016-12-20 12:48:14 -0500545 /*
546 * Note: disabling signals in worker threads as follows
547 * prevents the api post-mortem dump scheme from working
548 * {
549 * sigset_t s;
550 * sigfillset (&s);
551 * pthread_sigmask (SIG_SETMASK, &s, 0);
552 * }
553 */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700554
555 clib_mem_set_heap (w->thread_mheap);
556
Dave Barach9b8ffd92016-07-08 08:13:45 -0400557 if (vec_len (tm->thread_prefix) && w->registration->short_name)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700558 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400559 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
560 w->registration->short_name, w->instance_id, '\0');
561 vlib_set_thread_name ((char *) w->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700562 }
563
564 if (!w->registration->use_pthreads)
565 {
566
567 /* Initial barrier sync, for both worker and i/o threads */
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000568 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700569
570 while (*vlib_worker_threads->wait_at_barrier)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400571 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700572
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000573 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700574 }
575}
576
Dave Barach9b8ffd92016-07-08 08:13:45 -0400577void *
578vlib_worker_thread_bootstrap_fn (void *arg)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700579{
580 void *rv;
581 vlib_worker_thread_t *w = arg;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400582
583 w->lwp = syscall (SYS_gettid);
Pavel Kotucek98765202016-10-07 08:37:28 +0200584 w->thread_id = pthread_self ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700585
Damjan Marionf55f9b82017-05-10 21:06:28 +0200586 __os_thread_index = w - vlib_worker_threads;
Damjan Marion586afd72017-04-05 19:18:20 +0200587
Dave Barach9b8ffd92016-07-08 08:13:45 -0400588 rv = (void *) clib_calljmp
589 ((uword (*)(uword)) w->thread_function,
590 (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700591 /* NOTREACHED, we hope */
592 return rv;
593}
594
Dave Baracha690fdb2020-01-21 12:34:55 -0500595void
596vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id)
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200597{
598 const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
Lijian.Zhang4fbb9da2020-02-14 15:16:49 +0800599 const char *sys_node_path = "/sys/devices/system/node/node";
600 clib_bitmap_t *nbmp = 0, *cbmp = 0;
601 u32 node;
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200602 u8 *p = 0;
Dave Baracha690fdb2020-01-21 12:34:55 -0500603 int core_id = -1, numa_id = -1;
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200604
605 p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0);
606 clib_sysfs_read ((char *) p, "%d", &core_id);
607 vec_reset_length (p);
Lijian.Zhang4fbb9da2020-02-14 15:16:49 +0800608
609 /* *INDENT-OFF* */
610 clib_sysfs_read ("/sys/devices/system/node/online", "%U",
611 unformat_bitmap_list, &nbmp);
612 clib_bitmap_foreach (node, nbmp, ({
613 p = format (p, "%s%u/cpulist%c", sys_node_path, node, 0);
614 clib_sysfs_read ((char *) p, "%U", unformat_bitmap_list, &cbmp);
615 if (clib_bitmap_get (cbmp, cpu_id))
616 numa_id = node;
617 vec_reset_length (cbmp);
618 vec_reset_length (p);
619 }));
620 /* *INDENT-ON* */
621 vec_free (nbmp);
622 vec_free (cbmp);
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200623 vec_free (p);
624
625 w->core_id = core_id;
Dave Baracha690fdb2020-01-21 12:34:55 -0500626 w->numa_id = numa_id;
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200627}
628
Damjan Marion878c6092017-01-04 13:19:27 +0100629static clib_error_t *
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200630vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700631{
Damjan Marion878c6092017-01-04 13:19:27 +0100632 vlib_thread_main_t *tm = &vlib_thread_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400633 void *(*fp_arg) (void *) = fp;
Dave Baracha690fdb2020-01-21 12:34:55 -0500634 void *numa_heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700635
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200636 w->cpu_id = cpu_id;
Dave Baracha690fdb2020-01-21 12:34:55 -0500637 vlib_get_thread_core_numa (w, cpu_id);
Dave Baracha690fdb2020-01-21 12:34:55 -0500638
639 /* Set up NUMA-bound heap if indicated */
640 if (clib_per_numa_mheaps[w->numa_id] == 0)
641 {
642 /* If the user requested a NUMA heap, create it... */
643 if (tm->numa_heap_size)
644 {
645 numa_heap = clib_mem_init_thread_safe_numa
Florin Coras4c959952020-02-09 18:09:31 +0000646 (0 /* DIY */ , tm->numa_heap_size, w->numa_id);
Dave Baracha690fdb2020-01-21 12:34:55 -0500647 clib_per_numa_mheaps[w->numa_id] = numa_heap;
648 }
649 else
650 {
651 /* Or, use the main heap */
652 clib_per_numa_mheaps[w->numa_id] = w->thread_mheap;
653 }
654 }
655
Damjan Marion878c6092017-01-04 13:19:27 +0100656 if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads)
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200657 return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700658 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400659 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400660 pthread_t worker;
661 cpu_set_t cpuset;
662 CPU_ZERO (&cpuset);
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200663 CPU_SET (cpu_id, &cpuset);
Christophe Fontainefef15b42016-04-09 12:38:49 +0900664
Damjan Marion878c6092017-01-04 13:19:27 +0100665 if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w))
666 return clib_error_return_unix (0, "pthread_create");
667
668 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
669 return clib_error_return_unix (0, "pthread_setaffinity_np");
670
671 return 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400672 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700673}
674
Dave Barach9b8ffd92016-07-08 08:13:45 -0400675static clib_error_t *
676start_workers (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700677{
678 int i, j;
679 vlib_worker_thread_t *w;
680 vlib_main_t *vm_clone;
681 void *oldheap;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400682 vlib_thread_main_t *tm = &vlib_thread_main;
683 vlib_thread_registration_t *tr;
684 vlib_node_runtime_t *rt;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700685 u32 n_vlib_mains = tm->n_vlib_mains;
686 u32 worker_thread_index;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400687 u8 *main_heap = clib_mem_get_per_cpu_heap ();
Dave Barach9b8ffd92016-07-08 08:13:45 -0400688
Ed Warnickecb9cada2015-12-08 15:45:58 -0700689 vec_reset_length (vlib_worker_threads);
690
691 /* Set up the main thread */
692 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
Dave Barach3e7deb12016-02-05 16:29:53 -0500693 w->elog_track.name = "main thread";
Ed Warnickecb9cada2015-12-08 15:45:58 -0700694 elog_track_register (&vm->elog_main, &w->elog_track);
695
Dave Barach9b8ffd92016-07-08 08:13:45 -0400696 if (vec_len (tm->thread_prefix))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700697 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400698 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
699 vlib_set_thread_name ((char *) w->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700700 }
701
Dave Barach9b8ffd92016-07-08 08:13:45 -0400702 vm->elog_main.lock =
703 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
Dave Barach848191d2016-04-28 16:24:15 -0400704 vm->elog_main.lock[0] = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400705
Ed Warnickecb9cada2015-12-08 15:45:58 -0700706 if (n_vlib_mains > 1)
707 {
Dave Barach80f54e22017-03-08 19:08:56 -0500708 /* Replace hand-crafted length-1 vector with a real vector */
709 vlib_mains = 0;
710
711 vec_validate_aligned (vlib_mains, tm->n_vlib_mains - 1,
712 CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700713 _vec_len (vlib_mains) = 0;
Dave Barach80f54e22017-03-08 19:08:56 -0500714 vec_add1_aligned (vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700715
Dave Barach9b8ffd92016-07-08 08:13:45 -0400716 vlib_worker_threads->wait_at_barrier =
717 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700718 vlib_worker_threads->workers_at_barrier =
Dave Barach9b8ffd92016-07-08 08:13:45 -0400719 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700720
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100721 vlib_worker_threads->node_reforks_required =
722 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
723
Dave Barachf6c68d72018-11-01 08:12:52 -0400724 /* We'll need the rpc vector lock... */
725 clib_spinlock_init (&vm->pending_rpc_lock);
726
Ed Warnickecb9cada2015-12-08 15:45:58 -0700727 /* Ask for an initial barrier sync */
728 *vlib_worker_threads->workers_at_barrier = 0;
729 *vlib_worker_threads->wait_at_barrier = 1;
730
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100731 /* Without update or refork */
732 *vlib_worker_threads->node_reforks_required = 0;
733 vm->need_vlib_worker_thread_node_runtime_update = 0;
734
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100735 /* init timing */
736 vm->barrier_epoch = 0;
737 vm->barrier_no_close_before = 0;
738
Ed Warnickecb9cada2015-12-08 15:45:58 -0700739 worker_thread_index = 1;
740
Dave Barach9b8ffd92016-07-08 08:13:45 -0400741 for (i = 0; i < vec_len (tm->registrations); i++)
742 {
743 vlib_node_main_t *nm, *nm_clone;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400744 int k;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700745
Dave Barach9b8ffd92016-07-08 08:13:45 -0400746 tr = tm->registrations[i];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700747
Dave Barach9b8ffd92016-07-08 08:13:45 -0400748 if (tr->count == 0)
749 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700750
Dave Barach9b8ffd92016-07-08 08:13:45 -0400751 for (k = 0; k < tr->count; k++)
752 {
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100753 vlib_node_t *n;
754
Dave Barach9b8ffd92016-07-08 08:13:45 -0400755 vec_add2 (vlib_worker_threads, w, 1);
Dave Barach6a5adc32018-07-04 10:56:23 -0400756 /* Currently unused, may not really work */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400757 if (tr->mheap_size)
Dave Barach2c8e0022020-02-11 15:06:34 -0500758 w->thread_mheap = create_mspace (tr->mheap_size,
759 0 /* unlocked */ );
Dave Barach9b8ffd92016-07-08 08:13:45 -0400760 else
761 w->thread_mheap = main_heap;
Damjan Marion586afd72017-04-05 19:18:20 +0200762
763 w->thread_stack =
764 vlib_thread_stack_init (w - vlib_worker_threads);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400765 w->thread_function = tr->function;
766 w->thread_function_arg = w;
767 w->instance_id = k;
768 w->registration = tr;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700769
Dave Barach9b8ffd92016-07-08 08:13:45 -0400770 w->elog_track.name =
771 (char *) format (0, "%s %d", tr->name, k + 1);
772 vec_add1 (w->elog_track.name, 0);
773 elog_track_register (&vm->elog_main, &w->elog_track);
Bud Grise68adab92016-02-12 10:36:11 -0500774
Dave Barach9b8ffd92016-07-08 08:13:45 -0400775 if (tr->no_data_structure_clone)
776 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700777
Dave Barach9b8ffd92016-07-08 08:13:45 -0400778 /* Fork vlib_global_main et al. Look for bugs here */
779 oldheap = clib_mem_set_heap (w->thread_mheap);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700780
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200781 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
782 CLIB_CACHE_LINE_BYTES);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400783 clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700784
Damjan Marion586afd72017-04-05 19:18:20 +0200785 vm_clone->thread_index = worker_thread_index;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400786 vm_clone->heap_base = w->thread_mheap;
Dave Barach6a5adc32018-07-04 10:56:23 -0400787 vm_clone->heap_aligned_base = (void *)
788 (((uword) w->thread_mheap) & ~(VLIB_FRAME_ALIGN - 1));
Damjan Marione9f929b2017-03-16 11:32:09 +0100789 vm_clone->init_functions_called =
790 hash_create (0, /* value bytes */ 0);
Dave Barach2877eee2017-12-15 12:22:57 -0500791 vm_clone->pending_rpc_requests = 0;
792 vec_validate (vm_clone->pending_rpc_requests, 0);
793 _vec_len (vm_clone->pending_rpc_requests) = 0;
Dave Barachb7b92992018-10-17 10:38:51 -0400794 clib_memset (&vm_clone->random_buffer, 0,
795 sizeof (vm_clone->random_buffer));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796
Dave Barach9b8ffd92016-07-08 08:13:45 -0400797 nm = &vlib_mains[0]->node_main;
798 nm_clone = &vm_clone->node_main;
799 /* fork next frames array, preserving node runtime indices */
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200800 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
801 CLIB_CACHE_LINE_BYTES);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400802 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
803 {
804 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
805 u32 save_node_runtime_index;
806 u32 save_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700807
Dave Barach9b8ffd92016-07-08 08:13:45 -0400808 save_node_runtime_index = nf->node_runtime_index;
809 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
810 vlib_next_frame_init (nf);
811 nf->node_runtime_index = save_node_runtime_index;
812 nf->flags = save_flags;
813 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700814
Dave Barach9b8ffd92016-07-08 08:13:45 -0400815 /* fork the frame dispatch queue */
816 nm_clone->pending_frames = 0;
Dave Barach53fe4a72019-01-26 09:50:26 -0500817 vec_validate (nm_clone->pending_frames, 10);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400818 _vec_len (nm_clone->pending_frames) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700819
Dave Barach9b8ffd92016-07-08 08:13:45 -0400820 /* fork nodes */
821 nm_clone->nodes = 0;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100822
823 /* Allocate all nodes in single block for speed */
824 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
825
Dave Barach9b8ffd92016-07-08 08:13:45 -0400826 for (j = 0; j < vec_len (nm->nodes); j++)
827 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400828 clib_memcpy (n, nm->nodes[j], sizeof (*n));
829 /* none of the copied nodes have enqueue rights given out */
830 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
Dave Barachb7b92992018-10-17 10:38:51 -0400831 clib_memset (&n->stats_total, 0, sizeof (n->stats_total));
832 clib_memset (&n->stats_last_clear, 0,
833 sizeof (n->stats_last_clear));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400834 vec_add1 (nm_clone->nodes, n);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100835 n++;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400836 }
837 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200838 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
839 CLIB_CACHE_LINE_BYTES);
JingLiuZTE30af5da2017-07-24 10:53:31 +0800840 vec_foreach (rt,
841 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
Damjan Marione9f929b2017-03-16 11:32:09 +0100842 {
843 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Damjan Marion586afd72017-04-05 19:18:20 +0200844 rt->thread_index = vm_clone->thread_index;
Damjan Marione9f929b2017-03-16 11:32:09 +0100845 /* copy initial runtime_data from node */
Damjan Marionb6f93a12017-03-16 17:46:41 +0100846 if (n->runtime_data && n->runtime_data_bytes > 0)
Damjan Marione9f929b2017-03-16 11:32:09 +0100847 clib_memcpy (rt->runtime_data, n->runtime_data,
Damjan Marionb6f93a12017-03-16 17:46:41 +0100848 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
849 n->runtime_data_bytes));
Damjan Marione9f929b2017-03-16 11:32:09 +0100850 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700851
Dave Barach9b8ffd92016-07-08 08:13:45 -0400852 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200853 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
854 CLIB_CACHE_LINE_BYTES);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400855 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
Damjan Marione9f929b2017-03-16 11:32:09 +0100856 {
857 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Damjan Marion586afd72017-04-05 19:18:20 +0200858 rt->thread_index = vm_clone->thread_index;
Damjan Marione9f929b2017-03-16 11:32:09 +0100859 /* copy initial runtime_data from node */
Damjan Marionb6f93a12017-03-16 17:46:41 +0100860 if (n->runtime_data && n->runtime_data_bytes > 0)
Damjan Marione9f929b2017-03-16 11:32:09 +0100861 clib_memcpy (rt->runtime_data, n->runtime_data,
Damjan Marionb6f93a12017-03-16 17:46:41 +0100862 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
863 n->runtime_data_bytes));
Damjan Marione9f929b2017-03-16 11:32:09 +0100864 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700865
Dave Barach53fe4a72019-01-26 09:50:26 -0500866 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
867 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
868 CLIB_CACHE_LINE_BYTES);
869 vec_foreach (rt,
870 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
871 {
872 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
873 rt->thread_index = vm_clone->thread_index;
874 /* copy initial runtime_data from node */
875 if (n->runtime_data && n->runtime_data_bytes > 0)
876 clib_memcpy (rt->runtime_data, n->runtime_data,
877 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
878 n->runtime_data_bytes));
879 }
880
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200881 nm_clone->processes = vec_dup_aligned (nm->processes,
882 CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700883
Dave Barach593eedf2019-03-10 09:44:51 -0400884 /* Create per-thread frame freelist */
885 nm_clone->frame_sizes = vec_new (vlib_frame_size_t, 1);
886#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
Florin Coras93992a92017-05-24 18:03:56 -0700887 nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
Dave Barach593eedf2019-03-10 09:44:51 -0400888#endif
Dave Barach687c9022019-07-23 10:22:31 -0400889 nm_clone->node_by_error = nm->node_by_error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700890
Dave Barach9b8ffd92016-07-08 08:13:45 -0400891 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
Damjan Marionbc20bdf2015-12-17 14:28:18 +0100892
Dave Barach9b8ffd92016-07-08 08:13:45 -0400893 clib_mem_set_heap (oldheap);
Dave Barach80f54e22017-03-08 19:08:56 -0500894 vec_add1_aligned (vlib_mains, vm_clone, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700895
Ole Troan233e4682019-05-16 15:01:34 +0200896 /* Switch to the stats segment ... */
897 void *oldheap = vlib_stats_push_heap (0);
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200898 vm_clone->error_main.counters = vec_dup_aligned
899 (vlib_mains[0]->error_main.counters, CLIB_CACHE_LINE_BYTES);
Ole Troan233e4682019-05-16 15:01:34 +0200900 vlib_stats_pop_heap2 (vm_clone->error_main.counters,
901 worker_thread_index, oldheap, 1);
902
Damjan Marionbe3f4d52018-03-27 21:06:10 +0200903 vm_clone->error_main.counters_last_clear = vec_dup_aligned
904 (vlib_mains[0]->error_main.counters_last_clear,
905 CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700906
Dave Barach9b8ffd92016-07-08 08:13:45 -0400907 worker_thread_index++;
908 }
909 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700910 }
911 else
912 {
913 /* only have non-data-structure copy threads to create... */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400914 for (i = 0; i < vec_len (tm->registrations); i++)
915 {
916 tr = tm->registrations[i];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700917
Dave Barach9b8ffd92016-07-08 08:13:45 -0400918 for (j = 0; j < tr->count; j++)
919 {
920 vec_add2 (vlib_worker_threads, w, 1);
921 if (tr->mheap_size)
Dave Barach6a5adc32018-07-04 10:56:23 -0400922 {
Dave Barach6a5adc32018-07-04 10:56:23 -0400923 w->thread_mheap =
924 create_mspace (tr->mheap_size, 0 /* locked */ );
Dave Barach6a5adc32018-07-04 10:56:23 -0400925 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400926 else
927 w->thread_mheap = main_heap;
Damjan Marion586afd72017-04-05 19:18:20 +0200928 w->thread_stack =
929 vlib_thread_stack_init (w - vlib_worker_threads);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400930 w->thread_function = tr->function;
931 w->thread_function_arg = w;
932 w->instance_id = j;
933 w->elog_track.name =
934 (char *) format (0, "%s %d", tr->name, j + 1);
935 w->registration = tr;
936 vec_add1 (w->elog_track.name, 0);
937 elog_track_register (&vm->elog_main, &w->elog_track);
938 }
939 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700940 }
941
942 worker_thread_index = 1;
943
944 for (i = 0; i < vec_len (tm->registrations); i++)
945 {
Damjan Marion878c6092017-01-04 13:19:27 +0100946 clib_error_t *err;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700947 int j;
948
949 tr = tm->registrations[i];
950
951 if (tr->use_pthreads || tm->use_pthreads)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400952 {
953 for (j = 0; j < tr->count; j++)
954 {
955 w = vlib_worker_threads + worker_thread_index++;
Damjan Marion878c6092017-01-04 13:19:27 +0100956 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
957 w, 0);
958 if (err)
959 clib_error_report (err);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400960 }
961 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700962 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400963 {
964 uword c;
Damjan Marion878c6092017-01-04 13:19:27 +0100965 /* *INDENT-OFF* */
966 clib_bitmap_foreach (c, tr->coremask, ({
967 w = vlib_worker_threads + worker_thread_index++;
968 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
969 w, c);
970 if (err)
971 clib_error_report (err);
972 }));
973 /* *INDENT-ON* */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400974 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700975 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400976 vlib_worker_thread_barrier_sync (vm);
977 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700978 return 0;
979}
980
981VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
982
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100983
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100984static inline void
985worker_thread_node_runtime_update_internal (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700986{
987 int i, j;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700988 vlib_main_t *vm;
989 vlib_node_main_t *nm, *nm_clone;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700990 vlib_main_t *vm_clone;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100991 vlib_node_runtime_t *rt;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700992 never_inline void
993 vlib_node_runtime_sync_stats (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400994 vlib_node_runtime_t * r,
995 uword n_calls,
996 uword n_vectors, uword n_clocks);
997
Damjan Marion586afd72017-04-05 19:18:20 +0200998 ASSERT (vlib_get_thread_index () == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700999
Ed Warnickecb9cada2015-12-08 15:45:58 -07001000 vm = vlib_mains[0];
1001 nm = &vm->node_main;
1002
Ed Warnickecb9cada2015-12-08 15:45:58 -07001003 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
1004
Dave Barach9b8ffd92016-07-08 08:13:45 -04001005 /*
Ed Warnickecb9cada2015-12-08 15:45:58 -07001006 * Scrape all runtime stats, so we don't lose node runtime(s) with
1007 * pending counts, or throw away worker / io thread counts.
1008 */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001009 for (j = 0; j < vec_len (nm->nodes); j++)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001010 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001011 vlib_node_t *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001012 n = nm->nodes[j];
1013 vlib_node_sync_stats (vm, n);
1014 }
1015
1016 for (i = 1; i < vec_len (vlib_mains); i++)
1017 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001018 vlib_node_t *n;
1019
Ed Warnickecb9cada2015-12-08 15:45:58 -07001020 vm_clone = vlib_mains[i];
1021 nm_clone = &vm_clone->node_main;
1022
Dave Barach9b8ffd92016-07-08 08:13:45 -04001023 for (j = 0; j < vec_len (nm_clone->nodes); j++)
1024 {
1025 n = nm_clone->nodes[j];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001026
Dave Barach9b8ffd92016-07-08 08:13:45 -04001027 rt = vlib_node_get_runtime (vm_clone, n->index);
1028 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
1029 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001030 }
1031
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001032 /* Per-worker clone rebuilds are now done on each thread */
1033}
1034
1035
1036void
1037vlib_worker_thread_node_refork (void)
1038{
1039 vlib_main_t *vm, *vm_clone;
1040 vlib_node_main_t *nm, *nm_clone;
1041 vlib_node_t **old_nodes_clone;
1042 vlib_node_runtime_t *rt, *old_rt;
1043
1044 vlib_node_t *new_n_clone;
1045
1046 int j;
1047
1048 vm = vlib_mains[0];
1049 nm = &vm->node_main;
1050 vm_clone = vlib_get_main ();
1051 nm_clone = &vm_clone->node_main;
1052
1053 /* Re-clone error heap */
1054 u64 *old_counters = vm_clone->error_main.counters;
1055 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
1056
Dave Barach178cf492018-11-13 16:34:13 -05001057 clib_memcpy_fast (&vm_clone->error_main, &vm->error_main,
1058 sizeof (vm->error_main));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001059 j = vec_len (vm->error_main.counters) - 1;
Ole Troan233e4682019-05-16 15:01:34 +02001060
1061 /* Switch to the stats segment ... */
1062 void *oldheap = vlib_stats_push_heap (0);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001063 vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001064 vm_clone->error_main.counters = old_counters;
Ole Troan233e4682019-05-16 15:01:34 +02001065 vlib_stats_pop_heap2 (vm_clone->error_main.counters, vm_clone->thread_index,
1066 oldheap, 0);
1067
1068 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001069 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
1070
1071 nm_clone = &vm_clone->node_main;
1072 vec_free (nm_clone->next_frames);
Damjan Marionbe3f4d52018-03-27 21:06:10 +02001073 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
1074 CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001075
1076 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001077 {
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001078 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
1079 u32 save_node_runtime_index;
1080 u32 save_flags;
Damjan Marionbc20bdf2015-12-17 14:28:18 +01001081
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001082 save_node_runtime_index = nf->node_runtime_index;
1083 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
1084 vlib_next_frame_init (nf);
1085 nf->node_runtime_index = save_node_runtime_index;
1086 nf->flags = save_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001087 }
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001088
1089 old_nodes_clone = nm_clone->nodes;
1090 nm_clone->nodes = 0;
1091
1092 /* re-fork nodes */
1093
1094 /* Allocate all nodes in single block for speed */
1095 new_n_clone =
1096 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
1097 for (j = 0; j < vec_len (nm->nodes); j++)
1098 {
Benoît Ganne5517bd32019-08-30 16:20:12 +02001099 vlib_node_t *new_n = nm->nodes[j];
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001100
Dave Barach178cf492018-11-13 16:34:13 -05001101 clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001102 /* none of the copied nodes have enqueue rights given out */
1103 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1104
1105 if (j >= vec_len (old_nodes_clone))
1106 {
1107 /* new node, set to zero */
Dave Barachb7b92992018-10-17 10:38:51 -04001108 clib_memset (&new_n_clone->stats_total, 0,
1109 sizeof (new_n_clone->stats_total));
1110 clib_memset (&new_n_clone->stats_last_clear, 0,
1111 sizeof (new_n_clone->stats_last_clear));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001112 }
1113 else
1114 {
Benoît Ganne5517bd32019-08-30 16:20:12 +02001115 vlib_node_t *old_n_clone = old_nodes_clone[j];
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001116 /* Copy stats if the old data is valid */
Dave Barach178cf492018-11-13 16:34:13 -05001117 clib_memcpy_fast (&new_n_clone->stats_total,
1118 &old_n_clone->stats_total,
1119 sizeof (new_n_clone->stats_total));
1120 clib_memcpy_fast (&new_n_clone->stats_last_clear,
1121 &old_n_clone->stats_last_clear,
1122 sizeof (new_n_clone->stats_last_clear));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001123
1124 /* keep previous node state */
1125 new_n_clone->state = old_n_clone->state;
1126 }
1127 vec_add1 (nm_clone->nodes, new_n_clone);
1128 new_n_clone++;
1129 }
1130 /* Free the old node clones */
1131 clib_mem_free (old_nodes_clone[0]);
1132
1133 vec_free (old_nodes_clone);
1134
1135
1136 /* re-clone internal nodes */
1137 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1138 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +02001139 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1140 CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001141
1142 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1143 {
1144 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1145 rt->thread_index = vm_clone->thread_index;
1146 /* copy runtime_data, will be overwritten later for existing rt */
1147 if (n->runtime_data && n->runtime_data_bytes > 0)
Dave Barach178cf492018-11-13 16:34:13 -05001148 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1149 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1150 n->runtime_data_bytes));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001151 }
1152
1153 for (j = 0; j < vec_len (old_rt); j++)
1154 {
1155 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1156 rt->state = old_rt[j].state;
Dave Barach178cf492018-11-13 16:34:13 -05001157 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1158 VLIB_NODE_RUNTIME_DATA_SIZE);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001159 }
1160
1161 vec_free (old_rt);
1162
1163 /* re-clone input nodes */
1164 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1165 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
Damjan Marionbe3f4d52018-03-27 21:06:10 +02001166 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1167 CLIB_CACHE_LINE_BYTES);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001168
1169 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1170 {
1171 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1172 rt->thread_index = vm_clone->thread_index;
1173 /* copy runtime_data, will be overwritten later for existing rt */
1174 if (n->runtime_data && n->runtime_data_bytes > 0)
Dave Barach178cf492018-11-13 16:34:13 -05001175 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1176 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1177 n->runtime_data_bytes));
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001178 }
1179
1180 for (j = 0; j < vec_len (old_rt); j++)
1181 {
1182 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1183 rt->state = old_rt[j].state;
Dave Barach178cf492018-11-13 16:34:13 -05001184 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1185 VLIB_NODE_RUNTIME_DATA_SIZE);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001186 }
1187
1188 vec_free (old_rt);
1189
Dave Barach53fe4a72019-01-26 09:50:26 -05001190 /* re-clone pre-input nodes */
1191 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT];
1192 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
1193 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
1194 CLIB_CACHE_LINE_BYTES);
1195
1196 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1197 {
1198 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1199 rt->thread_index = vm_clone->thread_index;
1200 /* copy runtime_data, will be overwritten later for existing rt */
1201 if (n->runtime_data && n->runtime_data_bytes > 0)
1202 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1203 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1204 n->runtime_data_bytes));
1205 }
1206
1207 for (j = 0; j < vec_len (old_rt); j++)
1208 {
1209 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1210 rt->state = old_rt[j].state;
1211 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1212 VLIB_NODE_RUNTIME_DATA_SIZE);
1213 }
1214
1215 vec_free (old_rt);
1216
Damjan Marionbe3f4d52018-03-27 21:06:10 +02001217 nm_clone->processes = vec_dup_aligned (nm->processes,
1218 CLIB_CACHE_LINE_BYTES);
Dave Barach687c9022019-07-23 10:22:31 -04001219 nm_clone->node_by_error = nm->node_by_error;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001220}
1221
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001222void
1223vlib_worker_thread_node_runtime_update (void)
1224{
1225 /*
1226 * Make a note that we need to do a node runtime update
1227 * prior to releasing the barrier.
1228 */
1229 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001230}
1231
Pavel Kotucek1e765832016-09-23 08:54:14 +02001232u32
1233unformat_sched_policy (unformat_input_t * input, va_list * args)
1234{
1235 u32 *r = va_arg (*args, u32 *);
1236
1237 if (0);
1238#define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1239 foreach_sched_policy
1240#undef _
1241 else
1242 return 0;
1243 return 1;
1244}
1245
Ed Warnickecb9cada2015-12-08 15:45:58 -07001246static clib_error_t *
1247cpu_config (vlib_main_t * vm, unformat_input_t * input)
1248{
1249 vlib_thread_registration_t *tr;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001250 uword *p;
1251 vlib_thread_main_t *tm = &vlib_thread_main;
1252 u8 *name;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001253 uword *bitmap;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001254 u32 count;
1255
1256 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
Pavel Kotucek1e765832016-09-23 08:54:14 +02001257
Dave Barach9b8ffd92016-07-08 08:13:45 -04001258 tm->n_thread_stacks = 1; /* account for main thread */
Pavel Kotucek1e765832016-09-23 08:54:14 +02001259 tm->sched_policy = ~0;
1260 tm->sched_priority = ~0;
Damjan Marion858151f2018-07-11 10:51:00 +02001261 tm->main_lcore = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001262
1263 tr = tm->next;
1264
1265 while (tr)
1266 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001267 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001268 tr = tr->next;
1269 }
1270
Dave Barach9b8ffd92016-07-08 08:13:45 -04001271 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001272 {
Damjan Marionbf741472016-06-13 22:49:44 +02001273 if (unformat (input, "use-pthreads"))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001274 tm->use_pthreads = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001275 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001276 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001277 else if (unformat (input, "main-core %u", &tm->main_lcore))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001278 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001279 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001280 ;
Dave Baracha690fdb2020-01-21 12:34:55 -05001281 else if (unformat (input, "numa-heap-size %U",
1282 unformat_memory_size, &tm->numa_heap_size))
1283 ;
Yi Hee4a9eb72018-07-17 14:18:41 +08001284 else if (unformat (input, "coremask-%s %U", &name,
1285 unformat_bitmap_mask, &bitmap) ||
1286 unformat (input, "corelist-%s %U", &name,
1287 unformat_bitmap_list, &bitmap))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001288 {
1289 p = hash_get_mem (tm->thread_registrations_by_name, name);
1290 if (p == 0)
1291 return clib_error_return (0, "no such thread type '%s'", name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001292
Dave Barach9b8ffd92016-07-08 08:13:45 -04001293 tr = (vlib_thread_registration_t *) p[0];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001294
Dave Barach9b8ffd92016-07-08 08:13:45 -04001295 if (tr->use_pthreads)
1296 return clib_error_return (0,
1297 "corelist cannot be set for '%s' threads",
1298 name);
Vladimir Isaev18a4a372020-03-17 12:30:11 +03001299 if (tr->count)
1300 return clib_error_return
1301 (0, "core placement of '%s' threads is already configured",
1302 name);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001303
Dave Barach9b8ffd92016-07-08 08:13:45 -04001304 tr->coremask = bitmap;
1305 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1306 }
Pavel Kotucek1e765832016-09-23 08:54:14 +02001307 else
1308 if (unformat
1309 (input, "scheduler-policy %U", unformat_sched_policy,
1310 &tm->sched_policy))
1311 ;
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001312 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
Pavel Kotucek1e765832016-09-23 08:54:14 +02001313 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001314 else if (unformat (input, "%s %u", &name, &count))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001315 {
1316 p = hash_get_mem (tm->thread_registrations_by_name, name);
1317 if (p == 0)
Pavel Kotucek1e765832016-09-23 08:54:14 +02001318 return clib_error_return (0, "no such thread type 3 '%s'", name);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001319
1320 tr = (vlib_thread_registration_t *) p[0];
Vladimir Isaev18a4a372020-03-17 12:30:11 +03001321
Dave Barach9b8ffd92016-07-08 08:13:45 -04001322 if (tr->fixed_count)
1323 return clib_error_return
Vladimir Isaev18a4a372020-03-17 12:30:11 +03001324 (0, "number of '%s' threads not configurable", name);
1325 if (tr->count)
1326 return clib_error_return
1327 (0, "number of '%s' threads is already configured", name);
1328
Dave Barach9b8ffd92016-07-08 08:13:45 -04001329 tr->count = count;
1330 }
1331 else
1332 break;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001333 }
1334
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001335 if (tm->sched_priority != ~0)
Pavel Kotucek1e765832016-09-23 08:54:14 +02001336 {
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001337 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
Pavel Kotucek1e765832016-09-23 08:54:14 +02001338 {
1339 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1340 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1341 if (tm->sched_priority > prio_max)
1342 tm->sched_priority = prio_max;
1343 if (tm->sched_priority < prio_min)
1344 tm->sched_priority = prio_min;
1345 }
1346 else
Pavel Kotucekc08a1ed2016-09-23 08:54:14 +02001347 {
1348 return clib_error_return
1349 (0,
1350 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1351 tm->sched_priority);
1352 }
Pavel Kotucek1e765832016-09-23 08:54:14 +02001353 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001354 tr = tm->next;
1355
1356 if (!tm->thread_prefix)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001357 tm->thread_prefix = format (0, "vpp");
Ed Warnickecb9cada2015-12-08 15:45:58 -07001358
1359 while (tr)
1360 {
1361 tm->n_thread_stacks += tr->count;
1362 tm->n_pthreads += tr->count * tr->use_pthreads;
Damjan Marion878c6092017-01-04 13:19:27 +01001363 tm->n_threads += tr->count * (tr->use_pthreads == 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001364 tr = tr->next;
1365 }
1366
1367 return 0;
1368}
1369
1370VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1371
Ed Warnickecb9cada2015-12-08 15:45:58 -07001372void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
Dave Barach9b8ffd92016-07-08 08:13:45 -04001373void
1374vnet_main_fixup (vlib_fork_fixup_t which)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001375{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001376}
1377
1378void
1379vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which)
1380{
1381 vlib_main_t *vm = vlib_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001382
1383 if (vlib_mains == 0)
1384 return;
1385
Damjan Marion586afd72017-04-05 19:18:20 +02001386 ASSERT (vlib_get_thread_index () == 0);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001387 vlib_worker_thread_barrier_sync (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001388
1389 switch (which)
1390 {
1391 case VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX:
1392 vnet_main_fixup (VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX);
1393 break;
1394
1395 default:
Dave Barach9b8ffd92016-07-08 08:13:45 -04001396 ASSERT (0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001397 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001398 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001399}
1400
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001401 /*
1402 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1403 * based on a test based heuristic that barrier should be open for at least
1404 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1405 * point it is probably too late to make a difference)
1406 */
1407
1408#ifndef BARRIER_MINIMUM_OPEN_LIMIT
1409#define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1410#endif
1411
1412#ifndef BARRIER_MINIMUM_OPEN_FACTOR
1413#define BARRIER_MINIMUM_OPEN_FACTOR 3
1414#endif
1415
Dave Barach9b8ffd92016-07-08 08:13:45 -04001416void
Dave Barachc602b382019-06-03 19:48:22 -04001417vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
1418{
1419 f64 deadline;
1420 f64 now = vlib_time_now (vm);
1421 u32 count = vec_len (vlib_mains) - 1;
1422
1423 /* No worker threads? */
1424 if (count == 0)
1425 return;
1426
1427 deadline = now + BARRIER_SYNC_TIMEOUT;
1428 *vlib_worker_threads->wait_at_barrier = 1;
1429 while (*vlib_worker_threads->workers_at_barrier != count)
1430 {
1431 if ((now = vlib_time_now (vm)) > deadline)
1432 {
1433 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1434 os_panic ();
1435 }
1436 CLIB_PAUSE ();
1437 }
1438 *vlib_worker_threads->wait_at_barrier = 0;
1439}
1440
1441void
Damjan Marion8343ee52019-02-26 17:15:48 +01001442vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001443{
1444 f64 deadline;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001445 f64 now;
1446 f64 t_entry;
1447 f64 t_open;
1448 f64 t_closed;
Dave Barach9ae190e2019-04-23 10:07:24 -04001449 f64 max_vector_rate;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001450 u32 count;
Dave Barach9ae190e2019-04-23 10:07:24 -04001451 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001452
Dave Barach80f54e22017-03-08 19:08:56 -05001453 if (vec_len (vlib_mains) < 2)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001454 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001455
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001456 ASSERT (vlib_get_thread_index () == 0);
1457
Damjan Marion8343ee52019-02-26 17:15:48 +01001458 vlib_worker_threads[0].barrier_caller = func_name;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001459 count = vec_len (vlib_mains) - 1;
1460
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001461 /* Record entry relative to last close */
1462 now = vlib_time_now (vm);
1463 t_entry = now - vm->barrier_epoch;
1464
Ed Warnickecb9cada2015-12-08 15:45:58 -07001465 /* Tolerate recursive calls */
1466 if (++vlib_worker_threads[0].recursion_level > 1)
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001467 {
1468 barrier_trace_sync_rec (t_entry);
1469 return;
1470 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001471
Dave Barach9ae190e2019-04-23 10:07:24 -04001472 /*
1473 * Need data to decide if we're working hard enough to honor
1474 * the barrier hold-down timer.
1475 */
1476 max_vector_rate = 0.0;
1477 for (i = 1; i < vec_len (vlib_mains); i++)
1478 max_vector_rate =
1479 clib_max (max_vector_rate,
Dave Baracha8df85c2019-10-01 13:34:23 -04001480 (f64) vlib_last_vectors_per_main_loop (vlib_mains[i]));
Dave Barach9ae190e2019-04-23 10:07:24 -04001481
Bud Grise42f20062016-03-16 13:09:46 -04001482 vlib_worker_threads[0].barrier_sync_count++;
1483
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001484 /* Enforce minimum barrier open time to minimize packet loss */
1485 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001486
Dave Barach9ae190e2019-04-23 10:07:24 -04001487 /*
1488 * If any worker thread seems busy, which we define
1489 * as a vector rate above 10, we enforce the barrier hold-down timer
1490 */
1491 if (max_vector_rate > 10.0)
Dave Barach36feebb2018-09-07 11:12:27 -04001492 {
Dave Barach9ae190e2019-04-23 10:07:24 -04001493 while (1)
Dave Barach36feebb2018-09-07 11:12:27 -04001494 {
Dave Barach9ae190e2019-04-23 10:07:24 -04001495 now = vlib_time_now (vm);
1496 /* Barrier hold-down timer expired? */
1497 if (now >= vm->barrier_no_close_before)
1498 break;
1499 if ((vm->barrier_no_close_before - now)
1500 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1501 {
1502 clib_warning
1503 ("clock change: would have waited for %.4f seconds",
1504 (vm->barrier_no_close_before - now));
1505 break;
1506 }
Dave Barach36feebb2018-09-07 11:12:27 -04001507 }
1508 }
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001509 /* Record time of closure */
1510 t_open = now - vm->barrier_epoch;
1511 vm->barrier_epoch = now;
1512
1513 deadline = now + BARRIER_SYNC_TIMEOUT;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001514
1515 *vlib_worker_threads->wait_at_barrier = 1;
1516 while (*vlib_worker_threads->workers_at_barrier != count)
1517 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001518 if ((now = vlib_time_now (vm)) > deadline)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001519 {
1520 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1521 os_panic ();
1522 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001523 }
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001524
1525 t_closed = now - vm->barrier_epoch;
1526
1527 barrier_trace_sync (t_entry, t_open, t_closed);
1528
Ed Warnickecb9cada2015-12-08 15:45:58 -07001529}
1530
Dave Barach9b8ffd92016-07-08 08:13:45 -04001531void
1532vlib_worker_thread_barrier_release (vlib_main_t * vm)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001533{
1534 f64 deadline;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001535 f64 now;
1536 f64 minimum_open;
1537 f64 t_entry;
1538 f64 t_closed_total;
1539 f64 t_update_main = 0.0;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001540 int refork_needed = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001541
Dave Barach80f54e22017-03-08 19:08:56 -05001542 if (vec_len (vlib_mains) < 2)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001543 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001544
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001545 ASSERT (vlib_get_thread_index () == 0);
1546
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001547
1548 now = vlib_time_now (vm);
1549 t_entry = now - vm->barrier_epoch;
1550
Ed Warnickecb9cada2015-12-08 15:45:58 -07001551 if (--vlib_worker_threads[0].recursion_level > 0)
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001552 {
1553 barrier_trace_release_rec (t_entry);
1554 return;
1555 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001556
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001557 /* Update (all) node runtimes before releasing the barrier, if needed */
1558 if (vm->need_vlib_worker_thread_node_runtime_update)
1559 {
Dave Barach1ddbc012018-06-13 09:26:05 -04001560 /*
1561 * Lock stat segment here, so we's safe when
1562 * rebuilding the stat segment node clones from the
1563 * stat thread...
1564 */
1565 vlib_stat_segment_lock ();
1566
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001567 /* Do stats elements on main thread */
1568 worker_thread_node_runtime_update_internal ();
1569 vm->need_vlib_worker_thread_node_runtime_update = 0;
1570
1571 /* Do per thread rebuilds in parallel */
1572 refork_needed = 1;
Sirshak Das2f6d7bb2018-10-03 22:53:51 +00001573 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
1574 (vec_len (vlib_mains) - 1));
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001575 now = vlib_time_now (vm);
1576 t_update_main = now - vm->barrier_epoch;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001577 }
1578
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001579 deadline = now + BARRIER_SYNC_TIMEOUT;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001580
Dave Baracha4324a92019-02-19 17:05:30 -05001581 /*
1582 * Note when we let go of the barrier.
1583 * Workers can use this to derive a reasonably accurate
1584 * time offset. See vlib_time_now(...)
1585 */
1586 vm->time_last_barrier_release = vlib_time_now (vm);
1587 CLIB_MEMORY_STORE_BARRIER ();
1588
Ed Warnickecb9cada2015-12-08 15:45:58 -07001589 *vlib_worker_threads->wait_at_barrier = 0;
1590
1591 while (*vlib_worker_threads->workers_at_barrier > 0)
1592 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001593 if ((now = vlib_time_now (vm)) > deadline)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001594 {
1595 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1596 os_panic ();
1597 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001598 }
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001599
1600 /* Wait for reforks before continuing */
1601 if (refork_needed)
1602 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001603 now = vlib_time_now (vm);
1604
1605 deadline = now + BARRIER_SYNC_TIMEOUT;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001606
1607 while (*vlib_worker_threads->node_reforks_required > 0)
1608 {
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001609 if ((now = vlib_time_now (vm)) > deadline)
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001610 {
1611 fformat (stderr, "%s: worker thread refork deadlock\n",
1612 __FUNCTION__);
1613 os_panic ();
1614 }
1615 }
Dave Barach1ddbc012018-06-13 09:26:05 -04001616 vlib_stat_segment_unlock ();
Colin Tregenza Dancer21596182017-09-04 15:27:49 +01001617 }
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +01001618
1619 t_closed_total = now - vm->barrier_epoch;
1620
1621 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1622
1623 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1624 {
1625 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1626 }
1627
1628 vm->barrier_no_close_before = now + minimum_open;
1629
1630 /* Record barrier epoch (used to enforce minimum open time) */
1631 vm->barrier_epoch = now;
1632
1633 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1634
Ed Warnickecb9cada2015-12-08 15:45:58 -07001635}
1636
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001637/*
1638 * Check the frame queue to see if any frames are available.
Dave Barach9b8ffd92016-07-08 08:13:45 -04001639 * If so, pull the packets off the frames and put them to
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001640 * the handoff node.
1641 */
Damjan Marione9d52d52017-03-09 15:42:26 +01001642int
1643vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001644{
Damjan Marion586afd72017-04-05 19:18:20 +02001645 u32 thread_id = vm->thread_index;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001646 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001647 vlib_frame_queue_elt_t *elt;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001648 u32 *from, *to;
1649 vlib_frame_t *f;
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001650 int msg_type;
1651 int processed = 0;
1652 u32 n_left_to_node;
1653 u32 vectors = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001654
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001655 ASSERT (fq);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001656 ASSERT (vm == vlib_mains[thread_id]);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001657
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001658 if (PREDICT_FALSE (fqm->node_index == ~0))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001659 return 0;
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001660 /*
1661 * Gather trace data for frame queues
1662 */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001663 if (PREDICT_FALSE (fq->trace))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001664 {
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001665 frame_queue_trace_t *fqt;
1666 frame_queue_nelt_counter_t *fqh;
1667 u32 elix;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001668
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001669 fqt = &fqm->frame_queue_traces[thread_id];
Ed Warnickecb9cada2015-12-08 15:45:58 -07001670
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001671 fqt->nelts = fq->nelts;
1672 fqt->head = fq->head;
1673 fqt->head_hint = fq->head_hint;
1674 fqt->tail = fq->tail;
1675 fqt->threshold = fq->vector_threshold;
1676 fqt->n_in_use = fqt->tail - fqt->head;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001677 if (fqt->n_in_use >= fqt->nelts)
1678 {
1679 // if beyond max then use max
1680 fqt->n_in_use = fqt->nelts - 1;
1681 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001682
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001683 /* Record the number of elements in use in the histogram */
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001684 fqh = &fqm->frame_queue_histogram[thread_id];
Dave Barach9b8ffd92016-07-08 08:13:45 -04001685 fqh->count[fqt->n_in_use]++;
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001686
1687 /* Record a snapshot of the elements in use */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001688 for (elix = 0; elix < fqt->nelts; elix++)
1689 {
1690 elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1));
1691 if (1 || elt->valid)
1692 {
1693 fqt->n_vectors[elix] = elt->n_vectors;
1694 }
1695 }
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001696 fqt->written = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001697 }
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001698
1699 while (1)
1700 {
Dave Baracha638c182019-06-21 18:24:07 -04001701 vlib_buffer_t *b;
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001702 if (fq->head == fq->tail)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001703 {
1704 fq->head_hint = fq->head;
1705 return processed;
1706 }
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001707
Dave Barach9b8ffd92016-07-08 08:13:45 -04001708 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001709
1710 if (!elt->valid)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001711 {
1712 fq->head_hint = fq->head;
1713 return processed;
1714 }
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001715
1716 from = elt->buffer_index;
1717 msg_type = elt->msg_type;
1718
1719 ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME);
1720 ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE);
1721
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001722 f = vlib_get_frame_to_node (vm, fqm->node_index);
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001723
Dave Baracha638c182019-06-21 18:24:07 -04001724 /* If the first vector is traced, set the frame trace flag */
1725 b = vlib_get_buffer (vm, from[0]);
1726 if (b->flags & VLIB_BUFFER_IS_TRACED)
1727 f->frame_flags |= VLIB_NODE_FLAG_TRACE;
1728
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001729 to = vlib_frame_vector_args (f);
1730
1731 n_left_to_node = elt->n_vectors;
1732
1733 while (n_left_to_node >= 4)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001734 {
1735 to[0] = from[0];
1736 to[1] = from[1];
1737 to[2] = from[2];
1738 to[3] = from[3];
1739 to += 4;
1740 from += 4;
1741 n_left_to_node -= 4;
1742 }
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001743
1744 while (n_left_to_node > 0)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001745 {
1746 to[0] = from[0];
1747 to++;
1748 from++;
1749 n_left_to_node--;
1750 }
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001751
1752 vectors += elt->n_vectors;
1753 f->n_vectors = elt->n_vectors;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001754 vlib_put_frame_to_node (vm, fqm->node_index, f);
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001755
1756 elt->valid = 0;
1757 elt->n_vectors = 0;
1758 elt->msg_type = 0xfefefefe;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001759 CLIB_MEMORY_BARRIER ();
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001760 fq->head++;
1761 processed++;
1762
1763 /*
1764 * Limit the number of packets pushed into the graph
1765 */
1766 if (vectors >= fq->vector_threshold)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001767 {
1768 fq->head_hint = fq->head;
1769 return processed;
1770 }
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001771 }
Dave Barach9b8ffd92016-07-08 08:13:45 -04001772 ASSERT (0);
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001773 return processed;
1774}
1775
Dave Barach9b8ffd92016-07-08 08:13:45 -04001776void
1777vlib_worker_thread_fn (void *arg)
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001778{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001779 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
Damjan Marion878c6092017-01-04 13:19:27 +01001780 vlib_thread_main_t *tm = vlib_get_thread_main ();
Dave Barach9b8ffd92016-07-08 08:13:45 -04001781 vlib_main_t *vm = vlib_get_main ();
Damjan Marione9f929b2017-03-16 11:32:09 +01001782 clib_error_t *e;
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001783
Damjan Marion586afd72017-04-05 19:18:20 +02001784 ASSERT (vm->thread_index == vlib_get_thread_index ());
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001785
1786 vlib_worker_thread_init (w);
1787 clib_time_init (&vm->clib_time);
1788 clib_mem_set_heap (w->thread_mheap);
1789
Dave Barachc602b382019-06-03 19:48:22 -04001790 e = vlib_call_init_exit_functions_no_sort
Dave Barachf8d50682019-05-14 18:01:44 -04001791 (vm, &vm->worker_init_function_registrations, 1 /* call_once */ );
Damjan Marione9f929b2017-03-16 11:32:09 +01001792 if (e)
1793 clib_error_report (e);
1794
Dave Barachc602b382019-06-03 19:48:22 -04001795 /* Wait until the dpdk init sequence is complete */
1796 while (tm->extern_thread_mgmt && tm->worker_thread_release == 0)
1797 vlib_worker_thread_barrier_check ();
1798
Damjan Marione9d52d52017-03-09 15:42:26 +01001799 vlib_worker_loop (vm);
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001800}
1801
Dave Barach9b8ffd92016-07-08 08:13:45 -04001802/* *INDENT-OFF* */
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001803VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1804 .name = "workers",
1805 .short_name = "wk",
1806 .function = vlib_worker_thread_fn,
1807};
Dave Barach9b8ffd92016-07-08 08:13:45 -04001808/* *INDENT-ON* */
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001809
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001810u32
1811vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1812{
1813 vlib_thread_main_t *tm = vlib_get_thread_main ();
1814 vlib_frame_queue_main_t *fqm;
1815 vlib_frame_queue_t *fq;
1816 int i;
Elias Rudberg368104d2020-04-16 16:01:52 +02001817 u32 num_threads;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001818
1819 if (frame_queue_nelts == 0)
dongjuan88752482019-06-04 10:59:02 +08001820 frame_queue_nelts = FRAME_QUEUE_MAX_NELTS;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001821
Elias Rudberg368104d2020-04-16 16:01:52 +02001822 num_threads = 1 /* main thread */ + tm->n_threads;
1823 ASSERT (frame_queue_nelts >= 8 + num_threads);
Damjan Marion78fd7e82018-07-20 18:47:05 +02001824
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001825 vec_add2 (tm->frame_queue_mains, fqm, 1);
1826
1827 fqm->node_index = node_index;
Damjan Marion78fd7e82018-07-20 18:47:05 +02001828 fqm->frame_queue_nelts = frame_queue_nelts;
Elias Rudberg368104d2020-04-16 16:01:52 +02001829 fqm->queue_hi_thresh = frame_queue_nelts - num_threads;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001830
1831 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
Damjan Marion78fd7e82018-07-20 18:47:05 +02001832 vec_validate (fqm->per_thread_data, tm->n_vlib_mains - 1);
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001833 _vec_len (fqm->vlib_frame_queues) = 0;
1834 for (i = 0; i < tm->n_vlib_mains; i++)
1835 {
Damjan Marion78fd7e82018-07-20 18:47:05 +02001836 vlib_frame_queue_per_thread_data_t *ptd;
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001837 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1838 vec_add1 (fqm->vlib_frame_queues, fq);
Damjan Marion78fd7e82018-07-20 18:47:05 +02001839
1840 ptd = vec_elt_at_index (fqm->per_thread_data, i);
1841 vec_validate (ptd->handoff_queue_elt_by_thread_index,
1842 tm->n_vlib_mains - 1);
1843 vec_validate_init_empty (ptd->congested_handoff_queue_by_thread_index,
1844 tm->n_vlib_mains - 1,
1845 (vlib_frame_queue_t *) (~0));
Damjan Marionaaef1eb2016-11-08 17:37:01 +01001846 }
1847
1848 return (fqm - tm->frame_queue_mains);
1849}
1850
Damjan Marion878c6092017-01-04 13:19:27 +01001851int
1852vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb)
1853{
1854 vlib_thread_main_t *tm = vlib_get_thread_main ();
1855
1856 if (tm->extern_thread_mgmt)
1857 return -1;
1858
1859 tm->cb.vlib_launch_thread_cb = cb->vlib_launch_thread_cb;
1860 tm->extern_thread_mgmt = 1;
1861 return 0;
1862}
1863
Dave Barach69128d02017-09-26 10:54:34 -04001864void
1865vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1866 args)
1867{
1868 ASSERT (vlib_get_thread_index () == 0);
1869 vlib_process_signal_event (vlib_get_main (), args->node_index,
1870 args->type_opaque, args->data);
1871}
1872
1873void *rpc_call_main_thread_cb_fn;
1874
1875void
1876vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1877{
1878 if (rpc_call_main_thread_cb_fn)
1879 {
1880 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1881 (*fp) (callback, args, arg_size);
1882 }
1883 else
1884 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1885}
1886
Dave Barach9b8ffd92016-07-08 08:13:45 -04001887clib_error_t *
1888threads_init (vlib_main_t * vm)
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001889{
Ed Warnickecb9cada2015-12-08 15:45:58 -07001890 return 0;
1891}
1892
Damjan Marion0f8ecf02016-06-27 08:30:30 +02001893VLIB_INIT_FUNCTION (threads_init);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001894
Dave Baracha4324a92019-02-19 17:05:30 -05001895
1896static clib_error_t *
1897show_clock_command_fn (vlib_main_t * vm,
1898 unformat_input_t * input, vlib_cli_command_t * cmd)
1899{
1900 int i;
Dave Barachc25048b2020-01-29 18:05:24 -05001901 int verbose = 0;
Dave Barach19718002020-03-11 10:31:36 -04001902 clib_timebase_t _tb, *tb = &_tb;
Dave Baracha4324a92019-02-19 17:05:30 -05001903
Dave Barachc25048b2020-01-29 18:05:24 -05001904 (void) unformat (input, "verbose %=", &verbose, 1);
Dave Baracha4324a92019-02-19 17:05:30 -05001905
Dave Barach19718002020-03-11 10:31:36 -04001906 clib_timebase_init (tb, 0 /* GMT */ , CLIB_TIMEBASE_DAYLIGHT_NONE,
1907 &vm->clib_time);
1908
1909 vlib_cli_output (vm, "%U, %U GMT", format_clib_time, &vm->clib_time,
1910 verbose, format_clib_timebase_time,
1911 clib_timebase_now (tb));
Dave Baracha4324a92019-02-19 17:05:30 -05001912
1913 if (vec_len (vlib_mains) == 1)
1914 return 0;
1915
1916 vlib_cli_output (vm, "Time last barrier release %.9f",
1917 vm->time_last_barrier_release);
1918
1919 for (i = 1; i < vec_len (vlib_mains); i++)
1920 {
1921 if (vlib_mains[i] == 0)
1922 continue;
Dave Barachc25048b2020-01-29 18:05:24 -05001923
1924 vlib_cli_output (vm, "%d: %U", i, format_clib_time,
1925 &vlib_mains[i]->clib_time, verbose);
1926
Dave Baracha4324a92019-02-19 17:05:30 -05001927 vlib_cli_output (vm, "Thread %d offset %.9f error %.9f", i,
1928 vlib_mains[i]->time_offset,
1929 vm->time_last_barrier_release -
1930 vlib_mains[i]->time_last_barrier_release);
1931 }
1932 return 0;
1933}
1934
1935/* *INDENT-OFF* */
1936VLIB_CLI_COMMAND (f_command, static) =
1937{
1938 .path = "show clock",
1939 .short_help = "show clock",
1940 .function = show_clock_command_fn,
1941};
1942/* *INDENT-ON* */
1943
Dave Barach9b8ffd92016-07-08 08:13:45 -04001944/*
1945 * fd.io coding-style-patch-verification: ON
1946 *
1947 * Local Variables:
1948 * eval: (c-set-style "gnu")
1949 * End:
1950 */