blob: 2dfb535c3a5000149e4c7780d99641b47db48dcd [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#ifndef included_vlib_threads_h
16#define included_vlib_threads_h
17
18#include <vlib/main.h>
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +000019#include <vppinfra/callback.h>
Pavel Kotucek1e765832016-09-23 08:54:14 +020020#include <linux/sched.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070021
Dave Barach9b8ffd92016-07-08 08:13:45 -040022void vlib_set_thread_name (char *name);
Ed Warnickecb9cada2015-12-08 15:45:58 -070023
24/* arg is actually a vlib__thread_t * */
Dave Barach9b8ffd92016-07-08 08:13:45 -040025typedef void (vlib_thread_function_t) (void *arg);
Ed Warnickecb9cada2015-12-08 15:45:58 -070026
Dave Barach9b8ffd92016-07-08 08:13:45 -040027typedef struct vlib_thread_registration_
28{
Ed Warnickecb9cada2015-12-08 15:45:58 -070029 /* constructor generated list of thread registrations */
Dave Barach9b8ffd92016-07-08 08:13:45 -040030 struct vlib_thread_registration_ *next;
Ed Warnickecb9cada2015-12-08 15:45:58 -070031
32 /* config parameters */
Dave Barach9b8ffd92016-07-08 08:13:45 -040033 char *name;
34 char *short_name;
35 vlib_thread_function_t *function;
Ed Warnickecb9cada2015-12-08 15:45:58 -070036 uword mheap_size;
37 int fixed_count;
38 u32 count;
39 int no_data_structure_clone;
Bud Grise68adab92016-02-12 10:36:11 -050040 u32 frame_queue_nelts;
41
Ed Warnickecb9cada2015-12-08 15:45:58 -070042 /* All threads of this type run on pthreads */
43 int use_pthreads;
44 u32 first_index;
Dave Barach9b8ffd92016-07-08 08:13:45 -040045 uword *coremask;
Ed Warnickecb9cada2015-12-08 15:45:58 -070046} vlib_thread_registration_t;
47
Damjan Marion3f46baf2016-02-06 19:16:21 +010048/*
49 * Frames have their cpu / vlib_main_t index in the low-order N bits
50 * Make VLIB_MAX_CPUS a power-of-two, please...
Ed Warnickecb9cada2015-12-08 15:45:58 -070051 */
Damjan Marion3f46baf2016-02-06 19:16:21 +010052
Christophe Fontainefef15b42016-04-09 12:38:49 +090053#ifndef VLIB_MAX_CPUS
Damjan Marion3f46baf2016-02-06 19:16:21 +010054#define VLIB_MAX_CPUS 256
Christophe Fontainefef15b42016-04-09 12:38:49 +090055#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -070056
Damjan Marionce8debf2016-02-06 19:16:21 +010057#if VLIB_MAX_CPUS > CLIB_MAX_MHEAPS
58#error Please increase number of per-cpu mheaps
59#endif
60
Dave Barach9b8ffd92016-07-08 08:13:45 -040061#define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */
Ed Warnickecb9cada2015-12-08 15:45:58 -070062#define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
63
Florin Coras93992a92017-05-24 18:03:56 -070064#define VLIB_LOG2_THREAD_STACK_SIZE (21)
Ed Warnickecb9cada2015-12-08 15:45:58 -070065#define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
66
Dave Barach9b8ffd92016-07-08 08:13:45 -040067typedef enum
68{
69 VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME,
Ed Warnickecb9cada2015-12-08 15:45:58 -070070} vlib_frame_queue_msg_type_t;
71
Dave Barach9b8ffd92016-07-08 08:13:45 -040072typedef struct
73{
Damjan Marionb0d680b2016-11-04 14:41:44 +010074 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070075 volatile u32 valid;
Dave Barach9b8ffd92016-07-08 08:13:45 -040076 u32 msg_type;
Ed Warnickecb9cada2015-12-08 15:45:58 -070077 u32 n_vectors;
78 u32 last_n_vectors;
79
80 /* 256 * 4 = 1024 bytes, even mult of cache line size */
81 u32 buffer_index[VLIB_FRAME_SIZE];
Dave Barach9b8ffd92016-07-08 08:13:45 -040082}
83vlib_frame_queue_elt_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -070084
Dave Barach9b8ffd92016-07-08 08:13:45 -040085typedef struct
86{
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 /* First cache line */
Damjan Marionb0d680b2016-11-04 14:41:44 +010088 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
Ed Warnickecb9cada2015-12-08 15:45:58 -070089 volatile u32 *wait_at_barrier;
90 volatile u32 *workers_at_barrier;
Ed Warnickecb9cada2015-12-08 15:45:58 -070091
92 /* Second Cache Line */
Damjan Marionb0d680b2016-11-04 14:41:44 +010093 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
Ed Warnickecb9cada2015-12-08 15:45:58 -070094 void *thread_mheap;
Dave Barach9b8ffd92016-07-08 08:13:45 -040095 u8 *thread_stack;
96 void (*thread_function) (void *);
97 void *thread_function_arg;
98 i64 recursion_level;
99 elog_track_t elog_track;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700100 u32 instance_id;
101 vlib_thread_registration_t *registration;
102 u8 *name;
Bud Grise42f20062016-03-16 13:09:46 -0400103 u64 barrier_sync_count;
Dave Barach88c6e002018-09-30 15:54:06 -0400104 u8 barrier_elog_enabled;
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100105 const char *barrier_caller;
106 const char *barrier_context;
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100107 volatile u32 *node_reforks_required;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700108
109 long lwp;
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200110 int cpu_id;
111 int core_id;
Dave Baracha690fdb2020-01-21 12:34:55 -0500112 int numa_id;
Pavel Kotucek98765202016-10-07 08:37:28 +0200113 pthread_t thread_id;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114} vlib_worker_thread_t;
115
Damjan Marion6a7acc22016-12-19 16:28:36 +0100116extern vlib_worker_thread_t *vlib_worker_threads;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700117
Dave Barach9b8ffd92016-07-08 08:13:45 -0400118typedef struct
119{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120 /* enqueue side */
Damjan Marionb0d680b2016-11-04 14:41:44 +0100121 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122 volatile u64 tail;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123 u32 enqueue_full_events;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124
125 /* dequeue side */
Damjan Marionb0d680b2016-11-04 14:41:44 +0100126 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127 volatile u64 head;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700128 u64 trace;
129 u64 vector_threshold;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
131 /* dequeue hint to enqueue side */
Damjan Marionb0d680b2016-11-04 14:41:44 +0100132 CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133 volatile u64 head_hint;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134
135 /* read-only, constant, shared */
Damjan Marionb0d680b2016-11-04 14:41:44 +0100136 CLIB_CACHE_LINE_ALIGN_MARK (cacheline3);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 vlib_frame_queue_elt_t *elts;
138 u32 nelts;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400139}
140vlib_frame_queue_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700141
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100142typedef struct
143{
Damjan Marion78fd7e82018-07-20 18:47:05 +0200144 vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index;
145 vlib_frame_queue_t **congested_handoff_queue_by_thread_index;
146} vlib_frame_queue_per_thread_data_t;
147
148typedef struct
149{
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100150 u32 node_index;
Damjan Marion78fd7e82018-07-20 18:47:05 +0200151 u32 frame_queue_nelts;
152 u32 queue_hi_thresh;
153
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100154 vlib_frame_queue_t **vlib_frame_queues;
Damjan Marion78fd7e82018-07-20 18:47:05 +0200155 vlib_frame_queue_per_thread_data_t *per_thread_data;
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100156
157 /* for frame queue tracing */
158 frame_queue_trace_t *frame_queue_traces;
159 frame_queue_nelt_counter_t *frame_queue_histogram;
160} vlib_frame_queue_main_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161
Dave Barach69128d02017-09-26 10:54:34 -0400162typedef struct
163{
164 uword node_index;
165 uword type_opaque;
166 uword data;
167} vlib_process_signal_event_mt_args_t;
168
Ed Warnickecb9cada2015-12-08 15:45:58 -0700169/* Called early, in thread 0's context */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400170clib_error_t *vlib_thread_init (vlib_main_t * vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171
Dave Barach9b8ffd92016-07-08 08:13:45 -0400172int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
173 u32 frame_queue_index, vlib_frame_t * frame,
174 vlib_frame_queue_msg_type_t type);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175
Dave Barach9b8ffd92016-07-08 08:13:45 -0400176void vlib_worker_thread_node_runtime_update (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177
Dave Barach9b8ffd92016-07-08 08:13:45 -0400178void vlib_create_worker_threads (vlib_main_t * vm, int n,
179 void (*thread_function) (void *));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180
181void vlib_worker_thread_init (vlib_worker_thread_t * w);
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100182u32 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183
184/* Check for a barrier sync request every 30ms */
185#define BARRIER_SYNC_DELAY (0.030000)
186
187#if CLIB_DEBUG > 0
188/* long barrier timeout, for gdb... */
189#define BARRIER_SYNC_TIMEOUT (600.1)
190#else
191#define BARRIER_SYNC_TIMEOUT (1.0)
192#endif
193
Damjan Marion8343ee52019-02-26 17:15:48 +0100194#define vlib_worker_thread_barrier_sync(X) {vlib_worker_thread_barrier_sync_int(X, __FUNCTION__);}
Colin Tregenza Dancereb1ac172017-09-06 20:23:24 +0100195
Damjan Marion8343ee52019-02-26 17:15:48 +0100196void vlib_worker_thread_barrier_sync_int (vlib_main_t * vm,
197 const char *func_name);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400198void vlib_worker_thread_barrier_release (vlib_main_t * vm);
Neale Ranns42845dd2020-05-26 13:12:17 +0000199u8 vlib_worker_thread_barrier_held (void);
Dave Barachc602b382019-06-03 19:48:22 -0400200void vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100201void vlib_worker_thread_node_refork (void);
Neale Ranns42845dd2020-05-26 13:12:17 +0000202/**
203 * Wait until each of the workers has been once around the track
204 */
205void vlib_worker_wait_one_loop (void);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700206
Damjan Marion586afd72017-04-05 19:18:20 +0200207static_always_inline uword
208vlib_get_thread_index (void)
209{
Damjan Marionf55f9b82017-05-10 21:06:28 +0200210 return __os_thread_index;
Damjan Marion586afd72017-04-05 19:18:20 +0200211}
212
Dave Barach9b8ffd92016-07-08 08:13:45 -0400213always_inline void
214vlib_smp_unsafe_warning (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215{
216 if (CLIB_DEBUG > 0)
217 {
Damjan Marion586afd72017-04-05 19:18:20 +0200218 if (vlib_get_thread_index ())
Dave Barach9b8ffd92016-07-08 08:13:45 -0400219 fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220 }
221}
222
Damjan Marion1d429df2021-04-14 19:07:13 +0200223always_inline int
224__foreach_vlib_main_helper (vlib_main_t *ii, vlib_main_t **p)
225{
226 vlib_main_t *vm;
227 u32 index = ii - (vlib_main_t *) 0;
228
229 if (index >= vec_len (vlib_global_main.vlib_mains))
230 return 0;
231
232 *p = vm = vlib_global_main.vlib_mains[index];
233 ASSERT (index == 0 || vm->parked_at_barrier == 1);
234 return 1;
235}
236
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100237#define foreach_vlib_main() \
Damjan Marion1d429df2021-04-14 19:07:13 +0200238 for (vlib_main_t *ii = 0, *this_vlib_main; \
239 __foreach_vlib_main_helper (ii, &this_vlib_main); ii++) \
240 if (this_vlib_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241
Pavel Kotucek1e765832016-09-23 08:54:14 +0200242#define foreach_sched_policy \
243 _(SCHED_OTHER, OTHER, "other") \
244 _(SCHED_BATCH, BATCH, "batch") \
245 _(SCHED_IDLE, IDLE, "idle") \
246 _(SCHED_FIFO, FIFO, "fifo") \
247 _(SCHED_RR, RR, "rr")
248
249typedef enum
250{
251#define _(v,f,s) SCHED_POLICY_##f = v,
252 foreach_sched_policy
253#undef _
254 SCHED_POLICY_N,
255} sched_policy_t;
256
Dave Barach9b8ffd92016-07-08 08:13:45 -0400257typedef struct
258{
Damjan Marion878c6092017-01-04 13:19:27 +0100259 clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w,
Mohsin Kazmi5d64c782018-09-11 20:27:09 +0200260 unsigned cpu_id);
261 clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 cpu);
Damjan Marion878c6092017-01-04 13:19:27 +0100262} vlib_thread_callbacks_t;
263
264typedef struct
265{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266 /* Link list of registrations, built by constructors */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400267 vlib_thread_registration_t *next;
268
Ed Warnickecb9cada2015-12-08 15:45:58 -0700269 /* Vector of registrations, w/ non-data-structure clones at the top */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400270 vlib_thread_registration_t **registrations;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 uword *thread_registrations_by_name;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273
Dave Barach9b8ffd92016-07-08 08:13:45 -0400274 vlib_worker_thread_t *worker_threads;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275
Dave Barach9b8ffd92016-07-08 08:13:45 -0400276 /*
277 * Launch all threads as pthreads,
278 * not eal_rte_launch (strict affinity) threads
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279 */
280 int use_pthreads;
281
282 /* Number of vlib_main / vnet_main clones */
283 u32 n_vlib_mains;
284
285 /* Number of thread stacks to create */
286 u32 n_thread_stacks;
287
288 /* Number of pthreads */
289 u32 n_pthreads;
290
Damjan Marion878c6092017-01-04 13:19:27 +0100291 /* Number of threads */
292 u32 n_threads;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293
294 /* Number of cores to skip, must match the core mask */
295 u32 skip_cores;
296
297 /* Thread prefix name */
298 u8 *thread_prefix;
299
300 /* main thread lcore */
Damjan Marion858151f2018-07-11 10:51:00 +0200301 u32 main_lcore;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
303 /* Bitmap of available CPU cores */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400304 uword *cpu_core_bitmap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700305
306 /* Bitmap of available CPU sockets (NUMA nodes) */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400307 uword *cpu_socket_bitmap;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100309 /* Worker handoff queues */
310 vlib_frame_queue_main_t *frame_queue_mains;
Damjan Marion0f8ecf02016-06-27 08:30:30 +0200311
312 /* worker thread initialization barrier */
313 volatile u32 worker_thread_release;
314
Pavel Kotucek1e765832016-09-23 08:54:14 +0200315 /* scheduling policy */
316 u32 sched_policy;
317
318 /* scheduling policy priority */
319 u32 sched_priority;
320
Damjan Marion878c6092017-01-04 13:19:27 +0100321 /* callbacks */
322 vlib_thread_callbacks_t cb;
323 int extern_thread_mgmt;
Dave Baracha690fdb2020-01-21 12:34:55 -0500324
325 /* NUMA-bound heap size */
326 uword numa_heap_size;
327
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328} vlib_thread_main_t;
329
Damjan Marion6a7acc22016-12-19 16:28:36 +0100330extern vlib_thread_main_t vlib_thread_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700331
Damjan Marionce359db2017-03-16 16:15:38 +0100332#include <vlib/global_funcs.h>
333
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334#define VLIB_REGISTER_THREAD(x,...) \
335 __VA_ARGS__ vlib_thread_registration_t x; \
336static void __vlib_add_thread_registration_##x (void) \
337 __attribute__((__constructor__)) ; \
338static void __vlib_add_thread_registration_##x (void) \
339{ \
340 vlib_thread_main_t * tm = &vlib_thread_main; \
341 x.next = tm->next; \
342 tm->next = &x; \
343} \
Damjan Marion72d2c4f2018-04-05 21:32:29 +0200344static void __vlib_rm_thread_registration_##x (void) \
345 __attribute__((__destructor__)) ; \
346static void __vlib_rm_thread_registration_##x (void) \
347{ \
348 vlib_thread_main_t * tm = &vlib_thread_main; \
349 VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \
350} \
Dave Barach9b8ffd92016-07-08 08:13:45 -0400351__VA_ARGS__ vlib_thread_registration_t x
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352
Damjan Marion64034362016-11-07 22:19:55 +0100353always_inline u32
354vlib_num_workers ()
355{
356 return vlib_thread_main.n_vlib_mains - 1;
357}
358
359always_inline u32
Damjan Marion586afd72017-04-05 19:18:20 +0200360vlib_get_worker_thread_index (u32 worker_index)
Damjan Marion64034362016-11-07 22:19:55 +0100361{
362 return worker_index + 1;
363}
364
Damjan Marion3a4ed392016-11-08 13:20:42 +0100365always_inline u32
Damjan Marion586afd72017-04-05 19:18:20 +0200366vlib_get_worker_index (u32 thread_index)
Damjan Marion3a4ed392016-11-08 13:20:42 +0100367{
Damjan Marion586afd72017-04-05 19:18:20 +0200368 return thread_index - 1;
Damjan Marion3a4ed392016-11-08 13:20:42 +0100369}
370
371always_inline u32
372vlib_get_current_worker_index ()
373{
Damjan Marion586afd72017-04-05 19:18:20 +0200374 return vlib_get_thread_index () - 1;
Damjan Marion3a4ed392016-11-08 13:20:42 +0100375}
376
Damjan Marionce359db2017-03-16 16:15:38 +0100377static inline void
378vlib_worker_thread_barrier_check (void)
379{
380 if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
381 {
Damjan Marionfd8deb42021-03-06 12:26:28 +0100382 vlib_global_main_t *vgm = vlib_get_global_main ();
Dave Barach88c6e002018-09-30 15:54:06 -0400383 vlib_main_t *vm = vlib_get_main ();
384 u32 thread_index = vm->thread_index;
385 f64 t = vlib_time_now (vm);
386
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000387 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
388 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
389 vm->clib_time.last_cpu_time, 0 /* enter */ );
390
Dave Barach88c6e002018-09-30 15:54:06 -0400391 if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
392 {
393 vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
394 /* *INDENT-OFF* */
395 ELOG_TYPE_DECLARE (e) = {
396 .format = "barrier-wait-thread-%d",
397 .format_args = "i4",
398 };
399 /* *INDENT-ON* */
400
401 struct
402 {
403 u32 thread_index;
404 } __clib_packed *ed;
405
Damjan Marionf553a2c2021-03-26 13:45:37 +0100406 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track);
Dave Barach88c6e002018-09-30 15:54:06 -0400407 ed->thread_index = thread_index;
408 }
409
Damjan Marionce359db2017-03-16 16:15:38 +0100410 if (CLIB_DEBUG > 0)
411 {
412 vm = vlib_get_main ();
413 vm->parked_at_barrier = 1;
414 }
Alexander Kabaevfeda5452019-11-01 18:26:20 -0400415 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
Damjan Marionce359db2017-03-16 16:15:38 +0100416 while (*vlib_worker_threads->wait_at_barrier)
417 ;
Dave Baracha4324a92019-02-19 17:05:30 -0500418
419 /*
420 * Recompute the offset from thread-0 time.
421 * Note that vlib_time_now adds vm->time_offset, so
422 * clear it first. Save the resulting idea of "now", to
423 * see how well we're doing. See show_clock_command_fn(...)
424 */
425 {
426 f64 now;
427 vm->time_offset = 0.0;
428 now = vlib_time_now (vm);
Damjan Marionfd8deb42021-03-06 12:26:28 +0100429 vm->time_offset = vgm->vlib_mains[0]->time_last_barrier_release - now;
Dave Baracha4324a92019-02-19 17:05:30 -0500430 vm->time_last_barrier_release = vlib_time_now (vm);
431 }
432
Damjan Marionce359db2017-03-16 16:15:38 +0100433 if (CLIB_DEBUG > 0)
434 vm->parked_at_barrier = 0;
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000435 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100436
437 if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
438 {
Dave Barach88c6e002018-09-30 15:54:06 -0400439 if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
440 {
441 t = vlib_time_now (vm) - t;
442 vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
443 /* *INDENT-OFF* */
444 ELOG_TYPE_DECLARE (e) = {
445 .format = "barrier-refork-thread-%d",
446 .format_args = "i4",
447 };
448 /* *INDENT-ON* */
449
450 struct
451 {
452 u32 thread_index;
453 } __clib_packed *ed;
454
455 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
456 w->elog_track);
457 ed->thread_index = thread_index;
458 }
459
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100460 vlib_worker_thread_node_refork ();
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000461 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
462 -1);
Colin Tregenza Dancer21596182017-09-04 15:27:49 +0100463 while (*vlib_worker_threads->node_reforks_required)
464 ;
465 }
Dave Barach88c6e002018-09-30 15:54:06 -0400466 if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
467 {
468 t = vlib_time_now (vm) - t;
469 vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
470 /* *INDENT-OFF* */
471 ELOG_TYPE_DECLARE (e) = {
472 .format = "barrier-released-thread-%d: %dus",
473 .format_args = "i4i4",
474 };
475 /* *INDENT-ON* */
476
477 struct
478 {
479 u32 thread_index;
480 u32 duration;
481 } __clib_packed *ed;
482
Damjan Marionf553a2c2021-03-26 13:45:37 +0100483 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track);
Dave Barach88c6e002018-09-30 15:54:06 -0400484 ed->thread_index = thread_index;
485 ed->duration = (int) (1000000.0 * t);
486 }
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000487
488 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
489 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
490 vm->clib_time.last_cpu_time, 1 /* leave */ );
Damjan Marionce359db2017-03-16 16:15:38 +0100491 }
492}
493
Damjan Marion64034362016-11-07 22:19:55 +0100494always_inline vlib_main_t *
495vlib_get_worker_vlib_main (u32 worker_index)
496{
497 vlib_main_t *vm;
498 vlib_thread_main_t *tm = &vlib_thread_main;
499 ASSERT (worker_index < tm->n_vlib_mains - 1);
Damjan Marion6ffb7c62021-03-26 13:06:13 +0100500 vm = vlib_get_main_by_index (worker_index + 1);
Damjan Marion64034362016-11-07 22:19:55 +0100501 ASSERT (vm);
502 return vm;
503}
504
Florin Coras568ebc72018-09-18 16:12:50 -0700505static inline u8
506vlib_thread_is_main_w_barrier (void)
507{
508 return (!vlib_num_workers ()
509 || ((vlib_get_thread_index () == 0
510 && vlib_worker_threads->wait_at_barrier[0])));
511}
512
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100513static inline void
514vlib_put_frame_queue_elt (vlib_frame_queue_elt_t * hf)
515{
516 CLIB_MEMORY_BARRIER ();
517 hf->valid = 1;
518}
519
520static inline vlib_frame_queue_elt_t *
521vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
522{
523 vlib_frame_queue_t *fq;
524 vlib_frame_queue_elt_t *elt;
525 vlib_thread_main_t *tm = &vlib_thread_main;
526 vlib_frame_queue_main_t *fqm =
527 vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
528 u64 new_tail;
529
530 fq = fqm->vlib_frame_queues[index];
531 ASSERT (fq);
532
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000533 new_tail = clib_atomic_add_fetch (&fq->tail, 1);
Damjan Marionaaef1eb2016-11-08 17:37:01 +0100534
535 /* Wait until a ring slot is available */
536 while (new_tail >= fq->head_hint + fq->nelts)
537 vlib_worker_thread_barrier_check ();
538
539 elt = fq->elts + (new_tail & (fq->nelts - 1));
540
541 /* this would be very bad... */
542 while (elt->valid)
543 ;
544
545 elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
546 elt->last_n_vectors = elt->n_vectors = 0;
547
548 return elt;
549}
550
551static inline vlib_frame_queue_t *
552is_vlib_frame_queue_congested (u32 frame_queue_index,
553 u32 index,
554 u32 queue_hi_thresh,
555 vlib_frame_queue_t **
556 handoff_queue_by_worker_index)
557{
558 vlib_frame_queue_t *fq;
559 vlib_thread_main_t *tm = &vlib_thread_main;
560 vlib_frame_queue_main_t *fqm =
561 vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
562
563 fq = handoff_queue_by_worker_index[index];
564 if (fq != (vlib_frame_queue_t *) (~0))
565 return fq;
566
567 fq = fqm->vlib_frame_queues[index];
568 ASSERT (fq);
569
570 if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
571 {
572 /* a valid entry in the array will indicate the queue has reached
573 * the specified threshold and is congested
574 */
575 handoff_queue_by_worker_index[index] = fq;
576 fq->enqueue_full_events++;
577 return fq;
578 }
579
580 return NULL;
581}
582
583static inline vlib_frame_queue_elt_t *
584vlib_get_worker_handoff_queue_elt (u32 frame_queue_index,
585 u32 vlib_worker_index,
586 vlib_frame_queue_elt_t **
587 handoff_queue_elt_by_worker_index)
588{
589 vlib_frame_queue_elt_t *elt;
590
591 if (handoff_queue_elt_by_worker_index[vlib_worker_index])
592 return handoff_queue_elt_by_worker_index[vlib_worker_index];
593
594 elt = vlib_get_frame_queue_elt (frame_queue_index, vlib_worker_index);
595
596 handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
597
598 return elt;
599}
600
Damjan Marion586afd72017-04-05 19:18:20 +0200601u8 *vlib_thread_stack_init (uword thread_index);
Damjan Marion878c6092017-01-04 13:19:27 +0100602int vlib_thread_cb_register (struct vlib_main_t *vm,
603 vlib_thread_callbacks_t * cb);
Dave Barach69128d02017-09-26 10:54:34 -0400604extern void *rpc_call_main_thread_cb_fn;
605
606void
607vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
608 args);
609void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
Dave Baracha690fdb2020-01-21 12:34:55 -0500610void vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id);
Dave Barachab1a50c2020-10-06 14:08:16 -0400611vlib_thread_main_t *vlib_get_thread_main_not_inline (void);
Dave Baracha690fdb2020-01-21 12:34:55 -0500612
Ed Warnickecb9cada2015-12-08 15:45:58 -0700613#endif /* included_vlib_threads_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400614
615/*
616 * fd.io coding-style-patch-verification: ON
617 *
618 * Local Variables:
619 * eval: (c-set-style "gnu")
620 * End:
621 */