blob: 2f219955e708e7c56ff693263470fee005139a3d [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * main.c: main vector processing loop
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <math.h>
41#include <vppinfra/format.h>
42#include <vlib/vlib.h>
43#include <vlib/threads.h>
Dave Barach5c20a012017-06-13 08:48:31 -040044#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
Damjan Marion04a7f052017-07-10 15:06:17 +020046#include <vlib/unix/unix.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048/* Actually allocate a few extra slots of vector data to support
49 speculative vector enqueues which overflow vector data in next frame. */
50#define VLIB_FRAME_SIZE_ALLOC (VLIB_FRAME_SIZE + 4)
51
52always_inline u32
53vlib_frame_bytes (u32 n_scalar_bytes, u32 n_vector_bytes)
54{
55 u32 n_bytes;
56
57 /* Make room for vlib_frame_t plus scalar arguments. */
58 n_bytes = vlib_frame_vector_byte_offset (n_scalar_bytes);
59
60 /* Make room for vector arguments.
61 Allocate a few extra slots of vector data to support
62 speculative vector enqueues which overflow vector data in next frame. */
63#define VLIB_FRAME_SIZE_EXTRA 4
64 n_bytes += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * n_vector_bytes;
65
66 /* Magic number is first 32bit number after vector data.
67 Used to make sure that vector data is never overrun. */
68#define VLIB_FRAME_MAGIC (0xabadc0ed)
69 n_bytes += sizeof (u32);
70
71 /* Pad to cache line. */
72 n_bytes = round_pow2 (n_bytes, CLIB_CACHE_LINE_BYTES);
73
74 return n_bytes;
75}
76
77always_inline u32 *
78vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node)
79{
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 void *p = f;
Ed Warnickecb9cada2015-12-08 15:45:58 -070081
82 p += vlib_frame_vector_byte_offset (node->scalar_size);
83
84 p += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * node->vector_size;
85
86 return p;
87}
88
Dave Barach593eedf2019-03-10 09:44:51 -040089static inline vlib_frame_size_t *
Ed Warnickecb9cada2015-12-08 15:45:58 -070090get_frame_size_info (vlib_node_main_t * nm,
91 u32 n_scalar_bytes, u32 n_vector_bytes)
92{
Dave Barach593eedf2019-03-10 09:44:51 -040093#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
Ed Warnickecb9cada2015-12-08 15:45:58 -070094 uword key = (n_scalar_bytes << 16) | n_vector_bytes;
Dave Barach9b8ffd92016-07-08 08:13:45 -040095 uword *p, i;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97 p = hash_get (nm->frame_size_hash, key);
98 if (p)
99 i = p[0];
100 else
101 {
102 i = vec_len (nm->frame_sizes);
103 vec_validate (nm->frame_sizes, i);
104 hash_set (nm->frame_size_hash, key, i);
105 }
106
107 return vec_elt_at_index (nm->frame_sizes, i);
Dave Barach593eedf2019-03-10 09:44:51 -0400108#else
109 ASSERT (vlib_frame_bytes (n_scalar_bytes, n_vector_bytes)
110 == (vlib_frame_bytes (0, 4)));
111 return vec_elt_at_index (nm->frame_sizes, 0);
112#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113}
114
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200115static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400116vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index,
117 u32 frame_flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400119 vlib_node_main_t *nm = &vm->node_main;
120 vlib_frame_size_t *fs;
121 vlib_node_t *to_node;
122 vlib_frame_t *f;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200123 u32 l, n, scalar_size, vector_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500125 ASSERT (vm == vlib_get_main ());
126
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127 to_node = vlib_get_node (vm, to_node_index);
128
129 scalar_size = to_node->scalar_size;
130 vector_size = to_node->vector_size;
131
132 fs = get_frame_size_info (nm, scalar_size, vector_size);
133 n = vlib_frame_bytes (scalar_size, vector_size);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200134 if ((l = vec_len (fs->free_frames)) > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700135 {
136 /* Allocate from end of free list. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200137 f = fs->free_frames[l - 1];
138 _vec_len (fs->free_frames) = l - 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139 }
140 else
141 {
Damjan Marion3f46baf2016-02-06 19:16:21 +0100142 f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143 }
144
145 /* Poison frame when debugging. */
146 if (CLIB_DEBUG > 0)
Dave Barachb7b92992018-10-17 10:38:51 -0400147 clib_memset (f, 0xfe, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148
149 /* Insert magic number. */
150 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400151 u32 *magic;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152
153 magic = vlib_frame_find_magic (f, to_node);
154 *magic = VLIB_FRAME_MAGIC;
155 }
156
Damjan Marion633b6fd2018-09-14 14:38:53 +0200157 f->frame_flags = VLIB_FRAME_IS_ALLOCATED | frame_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700158 f->n_vectors = 0;
159 f->scalar_size = scalar_size;
160 f->vector_size = vector_size;
Damjan Mariona3d59862018-11-10 10:23:00 +0100161 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700162
163 fs->n_alloc_frames += 1;
164
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200165 return f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166}
167
168/* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX.
169 Returns frame index. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200170static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400171vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime,
172 u32 to_next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400174 vlib_node_t *from_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175
176 from_node = vlib_get_node (vm, from_node_runtime->node_index);
177 ASSERT (to_next_index < vec_len (from_node->next_nodes));
178
Dave Barach9b8ffd92016-07-08 08:13:45 -0400179 return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index],
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180 /* frame_flags */ 0);
181}
182
183vlib_frame_t *
184vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index)
185{
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200186 vlib_frame_t *f = vlib_frame_alloc_to_node (vm, to_node_index,
187 /* frame_flags */
188 VLIB_FRAME_FREE_AFTER_DISPATCH);
189 return vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190}
191
Dave Barachc74b43c2020-04-09 17:24:07 -0400192static inline void
193vlib_validate_frame_indices (vlib_frame_t * f)
194{
195 if (CLIB_DEBUG > 0)
196 {
197 int i;
198 u32 *from = vlib_frame_vector_args (f);
199
200 /* Check for bad buffer index values */
201 for (i = 0; i < f->n_vectors; i++)
202 {
203 if (from[i] == 0)
204 {
205 clib_warning ("BUG: buffer index 0 at index %d", i);
206 ASSERT (0);
207 }
208 else if (from[i] == 0xfefefefe)
209 {
210 clib_warning ("BUG: frame poison pattern at index %d", i);
211 ASSERT (0);
212 }
213 }
214 }
215}
216
Dave Barach9b8ffd92016-07-08 08:13:45 -0400217void
218vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400220 vlib_pending_frame_t *p;
221 vlib_node_t *to_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700222
223 if (f->n_vectors == 0)
224 return;
225
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500226 ASSERT (vm == vlib_get_main ());
227
Dave Barachc74b43c2020-04-09 17:24:07 -0400228 vlib_validate_frame_indices (f);
229
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230 to_node = vlib_get_node (vm, to_node_index);
231
232 vec_add2 (vm->node_main.pending_frames, p, 1);
233
Damjan Marion633b6fd2018-09-14 14:38:53 +0200234 f->frame_flags |= VLIB_FRAME_PENDING;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200235 p->frame = vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236 p->node_runtime_index = to_node->runtime_index;
237 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
238}
239
240/* Free given frame. */
241void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400242vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400244 vlib_node_main_t *nm = &vm->node_main;
245 vlib_node_t *node;
246 vlib_frame_size_t *fs;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400247
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500248 ASSERT (vm == vlib_get_main ());
Damjan Marion633b6fd2018-09-14 14:38:53 +0200249 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
251 node = vlib_get_node (vm, r->node_index);
252 fs = get_frame_size_info (nm, node->scalar_size, node->vector_size);
253
Damjan Marion633b6fd2018-09-14 14:38:53 +0200254 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255
256 /* No next frames may point to freed frame. */
257 if (CLIB_DEBUG > 0)
258 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400259 vlib_next_frame_t *nf;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200260 vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame != f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261 }
262
Damjan Marion296988d2019-02-21 20:24:54 +0100263 f->frame_flags &= ~(VLIB_FRAME_IS_ALLOCATED | VLIB_FRAME_NO_APPEND);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200265 vec_add1 (fs->free_frames, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266 ASSERT (fs->n_alloc_frames > 0);
267 fs->n_alloc_frames -= 1;
268}
269
270static clib_error_t *
271show_frame_stats (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400274 vlib_node_main_t *nm = &vm->node_main;
275 vlib_frame_size_t *fs;
276
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277 vlib_cli_output (vm, "%=6s%=12s%=12s", "Size", "# Alloc", "# Free");
278 vec_foreach (fs, nm->frame_sizes)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400279 {
280 u32 n_alloc = fs->n_alloc_frames;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200281 u32 n_free = vec_len (fs->free_frames);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282
Dave Barach9b8ffd92016-07-08 08:13:45 -0400283 if (n_alloc + n_free > 0)
284 vlib_cli_output (vm, "%=6d%=12d%=12d",
285 fs - nm->frame_sizes, n_alloc, n_free);
286 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
288 return 0;
289}
290
Dave Barach9b8ffd92016-07-08 08:13:45 -0400291/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700292VLIB_CLI_COMMAND (show_frame_stats_cli, static) = {
293 .path = "show vlib frame-allocation",
294 .short_help = "Show node dispatch frame statistics",
295 .function = show_frame_stats,
296};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400297/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298
299/* Change ownership of enqueue rights to given next node. */
300static void
301vlib_next_frame_change_ownership (vlib_main_t * vm,
302 vlib_node_runtime_t * node_runtime,
303 u32 next_index)
304{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400305 vlib_node_main_t *nm = &vm->node_main;
306 vlib_next_frame_t *next_frame;
307 vlib_node_t *node, *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
309 node = vec_elt (nm->nodes, node_runtime->node_index);
310
311 /* Only internal & input nodes are allowed to call other nodes. */
312 ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL
313 || node->type == VLIB_NODE_TYPE_INPUT
314 || node->type == VLIB_NODE_TYPE_PROCESS);
315
316 ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes);
317
Dave Barach9b8ffd92016-07-08 08:13:45 -0400318 next_frame =
319 vlib_node_runtime_get_next_frame (vm, node_runtime, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700320 next_node = vec_elt (nm->nodes, node->next_nodes[next_index]);
321
322 if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX)
323 {
324 /* Get frame from previous owner. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400325 vlib_next_frame_t *owner_next_frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326 vlib_next_frame_t tmp;
327
328 owner_next_frame =
329 vlib_node_get_next_frame (vm,
330 next_node->owner_node_index,
331 next_node->owner_next_index);
332
333 /* Swap target next frame with owner's. */
334 tmp = owner_next_frame[0];
335 owner_next_frame[0] = next_frame[0];
336 next_frame[0] = tmp;
337
338 /*
339 * If next_frame is already pending, we have to track down
340 * all pending frames and fix their next_frame_index fields.
341 */
342 if (next_frame->flags & VLIB_FRAME_PENDING)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400343 {
344 vlib_pending_frame_t *p;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200345 if (next_frame->frame != NULL)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400346 {
347 vec_foreach (p, nm->pending_frames)
348 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200349 if (p->frame == next_frame->frame)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400350 {
351 p->next_frame_index =
352 next_frame - vm->node_main.next_frames;
353 }
354 }
355 }
356 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357 }
358 else
359 {
360 /* No previous owner. Take ownership. */
361 next_frame->flags |= VLIB_FRAME_OWNER;
362 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400363
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364 /* Record new owner. */
365 next_node->owner_node_index = node->index;
366 next_node->owner_next_index = next_index;
367
368 /* Now we should be owner. */
369 ASSERT (next_frame->flags & VLIB_FRAME_OWNER);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400370}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371
372/* Make sure that magic number is still there.
373 Otherwise, it is likely that caller has overrun frame arguments. */
374always_inline void
375validate_frame_magic (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400376 vlib_frame_t * f, vlib_node_t * n, uword next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700377{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400378 vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]);
379 u32 *magic = vlib_frame_find_magic (f, next_node);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380 ASSERT (VLIB_FRAME_MAGIC == magic[0]);
381}
382
383vlib_frame_t *
384vlib_get_next_frame_internal (vlib_main_t * vm,
385 vlib_node_runtime_t * node,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400386 u32 next_index, u32 allocate_new_next_frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400388 vlib_frame_t *f;
389 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390 u32 n_used;
391
392 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
393
394 /* Make sure this next frame owns right to enqueue to destination frame. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400395 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396 vlib_next_frame_change_ownership (vm, node, next_index);
397
398 /* ??? Don't need valid flag: can use frame_index == ~0 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400399 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200401 nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700402 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
403 }
404
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200405 f = nf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406
407 /* Has frame been removed from pending vector (e.g. finished dispatching)?
408 If so we can reuse frame. */
Damjan Marion633b6fd2018-09-14 14:38:53 +0200409 if ((nf->flags & VLIB_FRAME_PENDING)
410 && !(f->frame_flags & VLIB_FRAME_PENDING))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700411 {
412 nf->flags &= ~VLIB_FRAME_PENDING;
413 f->n_vectors = 0;
Damjan Marion9162c2d2018-11-20 09:55:10 +0100414 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700415 }
416
Damjan Marion296988d2019-02-21 20:24:54 +0100417 /* Allocate new frame if current one is marked as no-append or
418 it is already full. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700419 n_used = f->n_vectors;
Damjan Marion296988d2019-02-21 20:24:54 +0100420 if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0) ||
421 (f->frame_flags & VLIB_FRAME_NO_APPEND))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700422 {
423 /* Old frame may need to be freed after dispatch, since we'll have
Dave Barach9b8ffd92016-07-08 08:13:45 -0400424 two redundant frames from node -> next node. */
425 if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200427 vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame);
Damjan Marion633b6fd2018-09-14 14:38:53 +0200428 f_old->frame_flags |= VLIB_FRAME_FREE_AFTER_DISPATCH;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429 }
430
431 /* Allocate new frame to replace full one. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200432 f = nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700433 n_used = f->n_vectors;
434 }
435
436 /* Should have free vectors in frame now. */
437 ASSERT (n_used < VLIB_FRAME_SIZE);
438
439 if (CLIB_DEBUG > 0)
440 {
441 validate_frame_magic (vm, f,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400442 vlib_get_node (vm, node->node_index), next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700443 }
444
445 return f;
446}
447
448static void
449vlib_put_next_frame_validate (vlib_main_t * vm,
450 vlib_node_runtime_t * rt,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400451 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700452{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400453 vlib_node_main_t *nm = &vm->node_main;
454 vlib_next_frame_t *nf;
455 vlib_frame_t *f;
456 vlib_node_runtime_t *next_rt;
457 vlib_node_t *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458 u32 n_before, n_after;
459
460 nf = vlib_node_runtime_get_next_frame (vm, rt, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200461 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700462
463 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
Dave Barachc74b43c2020-04-09 17:24:07 -0400464
465 vlib_validate_frame_indices (f);
466
Ed Warnickecb9cada2015-12-08 15:45:58 -0700467 n_after = VLIB_FRAME_SIZE - n_vectors_left;
468 n_before = f->n_vectors;
469
470 ASSERT (n_after >= n_before);
471
472 next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
473 nf->node_runtime_index);
474 next_node = vlib_get_node (vm, next_rt->node_index);
475 if (n_after > 0 && next_node->validate_frame)
476 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400477 u8 *msg = next_node->validate_frame (vm, rt, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700478 if (msg)
479 {
480 clib_warning ("%v", msg);
481 ASSERT (0);
482 }
483 vec_free (msg);
484 }
485}
486
487void
488vlib_put_next_frame (vlib_main_t * vm,
489 vlib_node_runtime_t * r,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400490 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400492 vlib_node_main_t *nm = &vm->node_main;
493 vlib_next_frame_t *nf;
494 vlib_frame_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700495 u32 n_vectors_in_frame;
496
Damjan Marion910d3692019-01-21 11:48:34 +0100497 if (CLIB_DEBUG > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498 vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left);
499
500 nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200501 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700502
503 /* Make sure that magic number is still there. Otherwise, caller
504 has overrun frame meta data. */
505 if (CLIB_DEBUG > 0)
506 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400507 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700508 validate_frame_magic (vm, f, node, next_index);
509 }
510
511 /* Convert # of vectors left -> number of vectors there. */
512 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
513 n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left;
514
515 f->n_vectors = n_vectors_in_frame;
516
517 /* If vectors were added to frame, add to pending vector. */
518 if (PREDICT_TRUE (n_vectors_in_frame > 0))
519 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400520 vlib_pending_frame_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700521 u32 v0, v1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400522
Ed Warnickecb9cada2015-12-08 15:45:58 -0700523 r->cached_next_index = next_index;
524
Damjan Marion633b6fd2018-09-14 14:38:53 +0200525 if (!(f->frame_flags & VLIB_FRAME_PENDING))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400526 {
527 __attribute__ ((unused)) vlib_node_t *node;
528 vlib_node_t *next_node;
529 vlib_node_runtime_t *next_runtime;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700530
Dave Barach9b8ffd92016-07-08 08:13:45 -0400531 node = vlib_get_node (vm, r->node_index);
532 next_node = vlib_get_next_node (vm, r->node_index, next_index);
533 next_runtime = vlib_node_get_runtime (vm, next_node->index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700534
Dave Barach9b8ffd92016-07-08 08:13:45 -0400535 vec_add2 (nm->pending_frames, p, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700536
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200537 p->frame = nf->frame;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400538 p->node_runtime_index = nf->node_runtime_index;
539 p->next_frame_index = nf - nm->next_frames;
540 nf->flags |= VLIB_FRAME_PENDING;
Damjan Marion633b6fd2018-09-14 14:38:53 +0200541 f->frame_flags |= VLIB_FRAME_PENDING;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700542
Dave Barach9b8ffd92016-07-08 08:13:45 -0400543 /*
544 * If we're going to dispatch this frame on another thread,
545 * force allocation of a new frame. Otherwise, we create
546 * a dangling frame reference. Each thread has its own copy of
547 * the next_frames vector.
548 */
Damjan Marion586afd72017-04-05 19:18:20 +0200549 if (0 && r->thread_index != next_runtime->thread_index)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400550 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200551 nf->frame = NULL;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400552 nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED);
553 }
554 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700555
556 /* Copy trace flag from next_frame and from runtime. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400557 nf->flags |=
558 (nf->flags & VLIB_NODE_FLAG_TRACE) | (r->
559 flags & VLIB_NODE_FLAG_TRACE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700560
561 v0 = nf->vectors_since_last_overflow;
562 v1 = v0 + n_vectors_in_frame;
563 nf->vectors_since_last_overflow = v1;
564 if (PREDICT_FALSE (v1 < v0))
565 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400566 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567 vec_elt (node->n_vectors_by_next_node, next_index) += v0;
568 }
569 }
570}
571
572/* Sync up runtime (32 bit counters) and main node stats (64 bit counters). */
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800573void
574vlib_node_runtime_sync_stats_node (vlib_node_t *n, vlib_node_runtime_t *r,
575 uword n_calls, uword n_vectors,
576 uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700577{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700578 n->stats_total.calls += n_calls + r->calls_since_last_overflow;
579 n->stats_total.vectors += n_vectors + r->vectors_since_last_overflow;
580 n->stats_total.clocks += n_clocks + r->clocks_since_last_overflow;
581 n->stats_total.max_clock = r->max_clock;
582 n->stats_total.max_clock_n = r->max_clock_n;
583
584 r->calls_since_last_overflow = 0;
585 r->vectors_since_last_overflow = 0;
586 r->clocks_since_last_overflow = 0;
587}
588
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800589void
590vlib_node_runtime_sync_stats (vlib_main_t *vm, vlib_node_runtime_t *r,
591 uword n_calls, uword n_vectors, uword n_clocks)
592{
593 vlib_node_t *n = vlib_get_node (vm, r->node_index);
594 vlib_node_runtime_sync_stats_node (n, r, n_calls, n_vectors, n_clocks);
595}
596
Dave Barach9b8ffd92016-07-08 08:13:45 -0400597always_inline void __attribute__ ((unused))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700598vlib_process_sync_stats (vlib_main_t * vm,
599 vlib_process_t * p,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000600 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400602 vlib_node_runtime_t *rt = &p->node_runtime;
603 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000604 vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700605 n->stats_total.suspends += p->n_suspends;
606 p->n_suspends = 0;
607}
608
Dave Barach9b8ffd92016-07-08 08:13:45 -0400609void
610vlib_node_sync_stats (vlib_main_t * vm, vlib_node_t * n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700611{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400612 vlib_node_runtime_t *rt;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700613
614 if (n->type == VLIB_NODE_TYPE_PROCESS)
615 {
616 /* Nothing to do for PROCESS nodes except in main thread */
Damjan Marionfd8deb42021-03-06 12:26:28 +0100617 if (vm != vlib_get_first_main ())
Dave Barach9b8ffd92016-07-08 08:13:45 -0400618 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700619
Dave Barach9b8ffd92016-07-08 08:13:45 -0400620 vlib_process_t *p = vlib_get_process_from_node (vm, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700621 n->stats_total.suspends += p->n_suspends;
622 p->n_suspends = 0;
623 rt = &p->node_runtime;
624 }
625 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400626 rt =
627 vec_elt_at_index (vm->node_main.nodes_by_type[n->type],
628 n->runtime_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700629
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000630 vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700631
632 /* Sync up runtime next frame vector counters with main node structure. */
633 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400634 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700635 uword i;
636 for (i = 0; i < rt->n_next_nodes; i++)
637 {
638 nf = vlib_node_runtime_get_next_frame (vm, rt, i);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400639 vec_elt (n->n_vectors_by_next_node, i) +=
640 nf->vectors_since_last_overflow;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700641 nf->vectors_since_last_overflow = 0;
642 }
643 }
644}
645
646always_inline u32
647vlib_node_runtime_update_stats (vlib_main_t * vm,
648 vlib_node_runtime_t * node,
649 uword n_calls,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000650 uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700651{
652 u32 ca0, ca1, v0, v1, cl0, cl1, r;
653
654 cl0 = cl1 = node->clocks_since_last_overflow;
655 ca0 = ca1 = node->calls_since_last_overflow;
656 v0 = v1 = node->vectors_since_last_overflow;
657
658 ca1 = ca0 + n_calls;
659 v1 = v0 + n_vectors;
660 cl1 = cl0 + n_clocks;
661
662 node->calls_since_last_overflow = ca1;
663 node->clocks_since_last_overflow = cl1;
664 node->vectors_since_last_overflow = v1;
Dave Barach4d1a8662018-09-10 12:31:15 -0400665
Ed Warnickecb9cada2015-12-08 15:45:58 -0700666 node->max_clock_n = node->max_clock > n_clocks ?
Dave Barach9b8ffd92016-07-08 08:13:45 -0400667 node->max_clock_n : n_vectors;
668 node->max_clock = node->max_clock > n_clocks ? node->max_clock : n_clocks;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700669
670 r = vlib_node_runtime_update_main_loop_vector_stats (vm, node, n_vectors);
671
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000672 if (PREDICT_FALSE (ca1 < ca0 || v1 < v0 || cl1 < cl0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700673 {
674 node->calls_since_last_overflow = ca0;
675 node->clocks_since_last_overflow = cl0;
676 node->vectors_since_last_overflow = v0;
Dave Barach4d1a8662018-09-10 12:31:15 -0400677
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000678 vlib_node_runtime_sync_stats (vm, node, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700679 }
680
681 return r;
682}
683
Dave Barach17e5d802019-05-01 11:30:13 -0400684always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700685vlib_process_update_stats (vlib_main_t * vm,
686 vlib_process_t * p,
Dave Barachec595ef2019-01-24 10:34:24 -0500687 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700688{
689 vlib_node_runtime_update_stats (vm, &p->node_runtime,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000690 n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691}
692
693static clib_error_t *
694vlib_cli_elog_clear (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400695 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700696{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100697 elog_reset_buffer (&vlib_global_main.elog_main);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700698 return 0;
699}
700
Dave Barach9b8ffd92016-07-08 08:13:45 -0400701/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700702VLIB_CLI_COMMAND (elog_clear_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400703 .path = "event-logger clear",
704 .short_help = "Clear the event log",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700705 .function = vlib_cli_elog_clear,
706};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400707/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708
709#ifdef CLIB_UNIX
710static clib_error_t *
711elog_save_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400712 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700713{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100714 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400715 char *file, *chroot_file;
716 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700717
Dave Barach9b8ffd92016-07-08 08:13:45 -0400718 if (!unformat (input, "%s", &file))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700719 {
720 vlib_cli_output (vm, "expected file name, got `%U'",
721 format_unformat_error, input);
722 return 0;
723 }
724
725 /* It's fairly hard to get "../oopsie" through unformat; just in case */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400726 if (strstr (file, "..") || index (file, '/'))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700727 {
728 vlib_cli_output (vm, "illegal characters in filename '%s'", file);
729 return 0;
730 }
731
732 chroot_file = (char *) format (0, "/tmp/%s%c", file, 0);
733
Dave Barach9b8ffd92016-07-08 08:13:45 -0400734 vec_free (file);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700735
736 vlib_cli_output (vm, "Saving %wd of %wd events to %s",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400737 elog_n_events_in_buffer (em),
738 elog_buffer_capacity (em), chroot_file);
739
Ed Warnickecb9cada2015-12-08 15:45:58 -0700740 vlib_worker_thread_barrier_sync (vm);
Dave Barach903fd512017-04-01 11:07:40 -0400741 error = elog_write_file (em, chroot_file, 1 /* flush ring */ );
Dave Barach9b8ffd92016-07-08 08:13:45 -0400742 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700743 vec_free (chroot_file);
744 return error;
745}
746
Dave Barach81481312017-05-16 09:08:14 -0400747void
Dave Barach27d978c2020-11-03 09:59:06 -0500748vlib_post_mortem_dump (void)
Dave Barach81481312017-05-16 09:08:14 -0400749{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100750 vlib_global_main_t *vgm = vlib_get_global_main ();
Dave Barach27d978c2020-11-03 09:59:06 -0500751
Damjan Marionfd8deb42021-03-06 12:26:28 +0100752 for (int i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
753 (vgm->post_mortem_callbacks[i]) ();
Dave Barach81481312017-05-16 09:08:14 -0400754}
755
Dave Barach9b8ffd92016-07-08 08:13:45 -0400756/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700757VLIB_CLI_COMMAND (elog_save_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400758 .path = "event-logger save",
759 .short_help = "event-logger save <filename> (saves log in /tmp/<filename>)",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700760 .function = elog_save_buffer,
761};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400762/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700763
Dave Barache5389bb2016-03-28 17:12:19 -0400764static clib_error_t *
765elog_stop (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400766 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400767{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100768 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400769
770 em->n_total_events_disable_limit = em->n_total_events;
771
772 vlib_cli_output (vm, "Stopped the event logger...");
773 return 0;
774}
775
Dave Barach9b8ffd92016-07-08 08:13:45 -0400776/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400777VLIB_CLI_COMMAND (elog_stop_cli, static) = {
778 .path = "event-logger stop",
779 .short_help = "Stop the event-logger",
780 .function = elog_stop,
781};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400782/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400783
784static clib_error_t *
785elog_restart (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400786 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400787{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100788 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400789
790 em->n_total_events_disable_limit = ~0;
791
792 vlib_cli_output (vm, "Restarted the event logger...");
793 return 0;
794}
795
Dave Barach9b8ffd92016-07-08 08:13:45 -0400796/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400797VLIB_CLI_COMMAND (elog_restart_cli, static) = {
798 .path = "event-logger restart",
799 .short_help = "Restart the event-logger",
800 .function = elog_restart,
801};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400802/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400803
804static clib_error_t *
Dave Barachbc867c32020-11-25 10:07:09 -0500805elog_resize_command_fn (vlib_main_t * vm,
806 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400807{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100808 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400809 u32 tmp;
810
811 /* Stop the parade */
Damjan Marionf553a2c2021-03-26 13:45:37 +0100812 elog_reset_buffer (em);
Dave Barache5389bb2016-03-28 17:12:19 -0400813
814 if (unformat (input, "%d", &tmp))
815 {
816 elog_alloc (em, tmp);
817 em->n_total_events_disable_limit = ~0;
818 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400819 else
Dave Barache5389bb2016-03-28 17:12:19 -0400820 return clib_error_return (0, "Must specify how many events in the ring");
821
822 vlib_cli_output (vm, "Resized ring and restarted the event logger...");
823 return 0;
824}
825
Dave Barach9b8ffd92016-07-08 08:13:45 -0400826/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400827VLIB_CLI_COMMAND (elog_resize_cli, static) = {
828 .path = "event-logger resize",
829 .short_help = "event-logger resize <nnn>",
Dave Barachbc867c32020-11-25 10:07:09 -0500830 .function = elog_resize_command_fn,
Dave Barache5389bb2016-03-28 17:12:19 -0400831};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400832/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400833
Ed Warnickecb9cada2015-12-08 15:45:58 -0700834#endif /* CLIB_UNIX */
835
Dave Barach9b8ffd92016-07-08 08:13:45 -0400836static void
837elog_show_buffer_internal (vlib_main_t * vm, u32 n_events_to_show)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700838{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100839 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400840 elog_event_t *e, *es;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700841 f64 dt;
842
843 /* Show events in VLIB time since log clock starts after VLIB clock. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400844 dt = (em->init_time.cpu - vm->clib_time.init_cpu_time)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700845 * vm->clib_time.seconds_per_clock;
846
847 es = elog_peek_events (em);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400848 vlib_cli_output (vm, "%d of %d events in buffer, logger %s", vec_len (es),
849 em->event_ring_size,
850 em->n_total_events < em->n_total_events_disable_limit ?
851 "running" : "stopped");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700852 vec_foreach (e, es)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400853 {
854 vlib_cli_output (vm, "%18.9f: %U",
855 e->time + dt, format_elog_event, em, e);
856 n_events_to_show--;
857 if (n_events_to_show == 0)
858 break;
859 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700860 vec_free (es);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400861
Ed Warnickecb9cada2015-12-08 15:45:58 -0700862}
863
864static clib_error_t *
865elog_show_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400866 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700867{
868 u32 n_events_to_show;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400869 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700870
871 n_events_to_show = 250;
872 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
873 {
874 if (unformat (input, "%d", &n_events_to_show))
875 ;
876 else if (unformat (input, "all"))
877 n_events_to_show = ~0;
878 else
879 return unformat_parse_error (input);
880 }
881 elog_show_buffer_internal (vm, n_events_to_show);
882 return error;
883}
884
Dave Barach9b8ffd92016-07-08 08:13:45 -0400885/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700886VLIB_CLI_COMMAND (elog_show_cli, static) = {
887 .path = "show event-logger",
888 .short_help = "Show event logger info",
889 .function = elog_show_buffer,
890};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400891/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700892
Dave Barach9b8ffd92016-07-08 08:13:45 -0400893void
894vlib_gdb_show_event_log (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700895{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400896 elog_show_buffer_internal (vlib_get_main (), (u32) ~ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700897}
898
Dave Barachfb6e59d2016-03-26 18:45:42 -0400899static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700900vlib_elog_main_loop_event (vlib_main_t * vm,
901 u32 node_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400902 u64 time, u32 n_vectors, u32 is_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700903{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100904 vlib_main_t *evm = vlib_get_first_main ();
Damjan Marionf553a2c2021-03-26 13:45:37 +0100905 elog_main_t *em = vlib_get_elog_main ();
Dave Barach900cbad2019-01-31 19:12:51 -0500906 int enabled = evm->elog_trace_graph_dispatch |
907 evm->elog_trace_graph_circuit;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700908
Dave Barach900cbad2019-01-31 19:12:51 -0500909 if (PREDICT_FALSE (enabled && n_vectors))
910 {
911 if (PREDICT_FALSE (!elog_is_enabled (em)))
912 {
913 evm->elog_trace_graph_dispatch = 0;
914 evm->elog_trace_graph_circuit = 0;
915 return;
916 }
917 if (PREDICT_TRUE
918 (evm->elog_trace_graph_dispatch ||
919 (evm->elog_trace_graph_circuit &&
920 node_index == evm->elog_trace_graph_circuit_node_index)))
921 {
922 elog_track (em,
923 /* event type */
924 vec_elt_at_index (is_return
925 ? evm->node_return_elog_event_types
926 : evm->node_call_elog_event_types,
927 node_index),
928 /* track */
929 (vm->thread_index ?
930 &vlib_worker_threads[vm->thread_index].elog_track
931 : &em->default_track),
932 /* data to log */ n_vectors);
933 }
934 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700935}
936
Dave Barach7bee7732017-10-18 18:48:11 -0400937static inline void
938add_trajectory_trace (vlib_buffer_t * b, u32 node_index)
939{
940#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
Benoît Gannef89bbbe2021-03-04 14:31:03 +0100941 if (PREDICT_FALSE (b->trajectory_nb >= VLIB_BUFFER_TRACE_TRAJECTORY_MAX))
942 return;
943 b->trajectory_trace[b->trajectory_nb] = node_index;
944 b->trajectory_nb++;
Dave Barach7bee7732017-10-18 18:48:11 -0400945#endif
946}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700947
Damjan Marion9a332e12017-03-28 15:11:20 +0200948static_always_inline u64
Ed Warnickecb9cada2015-12-08 15:45:58 -0700949dispatch_node (vlib_main_t * vm,
950 vlib_node_runtime_t * node,
951 vlib_node_type_t type,
952 vlib_node_state_t dispatch_state,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400953 vlib_frame_t * frame, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700954{
955 uword n, v;
956 u64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400957 vlib_node_main_t *nm = &vm->node_main;
958 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700959
960 if (CLIB_DEBUG > 0)
961 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400962 vlib_node_t *n = vlib_get_node (vm, node->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700963 ASSERT (n->type == type);
964 }
965
966 /* Only non-internal nodes may be disabled. */
967 if (type != VLIB_NODE_TYPE_INTERNAL && node->state != dispatch_state)
968 {
969 ASSERT (type != VLIB_NODE_TYPE_INTERNAL);
970 return last_time_stamp;
971 }
972
973 if ((type == VLIB_NODE_TYPE_PRE_INPUT || type == VLIB_NODE_TYPE_INPUT)
974 && dispatch_state != VLIB_NODE_STATE_INTERRUPT)
975 {
976 u32 c = node->input_main_loops_per_call;
977 /* Only call node when count reaches zero. */
978 if (c)
979 {
980 node->input_main_loops_per_call = c - 1;
981 return last_time_stamp;
982 }
983 }
984
985 /* Speculatively prefetch next frames. */
986 if (node->n_next_nodes > 0)
987 {
988 nf = vec_elt_at_index (nm->next_frames, node->next_frame_index);
989 CLIB_PREFETCH (nf, 4 * sizeof (nf[0]), WRITE);
990 }
991
992 vm->cpu_time_last_node_dispatch = last_time_stamp;
993
Dave Barach900cbad2019-01-31 19:12:51 -0500994 vlib_elog_main_loop_event (vm, node->node_index,
995 last_time_stamp, frame ? frame->n_vectors : 0,
996 /* is_after */ 0);
997
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000998 vlib_node_runtime_perf_counter (vm, node, frame, 0, last_time_stamp,
999 VLIB_NODE_RUNTIME_PERF_BEFORE);
Dave Barach900cbad2019-01-31 19:12:51 -05001000
1001 /*
1002 * Turn this on if you run into
1003 * "bad monkey" contexts, and you want to know exactly
1004 * which nodes they've visited... See ixge.c...
1005 */
1006 if (VLIB_BUFFER_TRACE_TRAJECTORY && frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001007 {
Dave Barach900cbad2019-01-31 19:12:51 -05001008 int i;
1009 u32 *from;
1010 from = vlib_frame_vector_args (frame);
1011 for (i = 0; i < frame->n_vectors; i++)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001012 {
Dave Barach900cbad2019-01-31 19:12:51 -05001013 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1014 add_trajectory_trace (b, node->node_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001015 }
Damjan Marion8b60fb02020-11-27 20:15:17 +01001016 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
1017 n = node->function (vm, node, frame);
1018 else
1019 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -05001020 }
1021 else
1022 {
Damjan Marion8b60fb02020-11-27 20:15:17 +01001023 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
1024 n = node->function (vm, node, frame);
1025 else
1026 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -05001027 }
1028
1029 t = clib_cpu_time_now ();
1030
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001031 vlib_node_runtime_perf_counter (vm, node, frame, n, t,
1032 VLIB_NODE_RUNTIME_PERF_AFTER);
Dave Barach900cbad2019-01-31 19:12:51 -05001033
1034 vlib_elog_main_loop_event (vm, node->node_index, t, n, 1 /* is_after */ );
1035
1036 vm->main_loop_vectors_processed += n;
1037 vm->main_loop_nodes_processed += n > 0;
1038
1039 v = vlib_node_runtime_update_stats (vm, node,
1040 /* n_calls */ 1,
1041 /* n_vectors */ n,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001042 /* n_clocks */ t - last_time_stamp);
Dave Barach900cbad2019-01-31 19:12:51 -05001043
Florin Coras982e44f2021-03-19 13:12:41 -07001044 /* When in adaptive mode and vector rate crosses threshold switch to
1045 polling mode and vice versa. */
1046 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_ADAPTIVE_MODE))
Dave Barach900cbad2019-01-31 19:12:51 -05001047 {
1048 /* *INDENT-OFF* */
1049 ELOG_TYPE_DECLARE (e) =
1050 {
1051 .function = (char *) __FUNCTION__,
1052 .format = "%s vector length %d, switching to %s",
1053 .format_args = "T4i4t4",
1054 .n_enum_strings = 2,
1055 .enum_strings = {
1056 "interrupt", "polling",
1057 },
1058 };
1059 /* *INDENT-ON* */
1060 struct
1061 {
1062 u32 node_name, vector_length, is_polling;
1063 } *ed;
1064
1065 if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT
1066 && v >= nm->polling_threshold_vector_length) &&
1067 !(node->flags &
1068 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
Dave Barach3ae28732018-11-16 17:19:00 -05001069 {
Dave Barach900cbad2019-01-31 19:12:51 -05001070 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1071 n->state = VLIB_NODE_STATE_POLLING;
1072 node->state = VLIB_NODE_STATE_POLLING;
1073 node->flags &=
1074 ~VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
1075 node->flags |= VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1076 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] -= 1;
1077 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001078
Damjan Marionfd8deb42021-03-06 12:26:28 +01001079 if (PREDICT_FALSE (
1080 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001081 {
Dave Barach900cbad2019-01-31 19:12:51 -05001082 vlib_worker_thread_t *w = vlib_worker_threads
1083 + vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001084
Steven6aa75af2017-02-24 10:03:22 -08001085 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1086 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001087 ed->node_name = n->name_elog_string;
1088 ed->vector_length = v;
1089 ed->is_polling = 1;
1090 }
Dave Barach900cbad2019-01-31 19:12:51 -05001091 }
1092 else if (dispatch_state == VLIB_NODE_STATE_POLLING
1093 && v <= nm->interrupt_threshold_vector_length)
1094 {
1095 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1096 if (node->flags &
1097 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001098 {
Dave Barach900cbad2019-01-31 19:12:51 -05001099 /* Switch to interrupt mode after dispatch in polling one more time.
1100 This allows driver to re-enable interrupts. */
1101 n->state = VLIB_NODE_STATE_INTERRUPT;
1102 node->state = VLIB_NODE_STATE_INTERRUPT;
1103 node->flags &=
1104 ~VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1105 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] -= 1;
1106 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] += 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001107
Dave Barach900cbad2019-01-31 19:12:51 -05001108 }
1109 else
1110 {
1111 vlib_worker_thread_t *w = vlib_worker_threads
1112 + vm->thread_index;
1113 node->flags |=
1114 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001115 if (PREDICT_FALSE (
1116 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001117 {
Steven6aa75af2017-02-24 10:03:22 -08001118 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1119 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001120 ed->node_name = n->name_elog_string;
1121 ed->vector_length = v;
1122 ed->is_polling = 0;
1123 }
1124 }
1125 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001126 }
1127
1128 return t;
1129}
1130
Damjan Marion9a332e12017-03-28 15:11:20 +02001131static u64
Dave Baracha6269992017-06-07 08:18:49 -04001132dispatch_pending_node (vlib_main_t * vm, uword pending_frame_index,
1133 u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001134{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001135 vlib_node_main_t *nm = &vm->node_main;
1136 vlib_frame_t *f;
Dave Barach11fb09e2020-08-06 12:10:09 -04001137 vlib_next_frame_t *nf, nf_placeholder;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001138 vlib_node_runtime_t *n;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001139 vlib_frame_t *restore_frame;
Dave Baracha6269992017-06-07 08:18:49 -04001140 vlib_pending_frame_t *p;
1141
1142 /* See comment below about dangling references to nm->pending_frames */
1143 p = nm->pending_frames + pending_frame_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001144
1145 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1146 p->node_runtime_index);
1147
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001148 f = vlib_get_frame (vm, p->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001149 if (p->next_frame_index == VLIB_PENDING_FRAME_NO_NEXT_FRAME)
1150 {
Dave Barach11fb09e2020-08-06 12:10:09 -04001151 /* No next frame: so use placeholder on stack. */
1152 nf = &nf_placeholder;
Damjan Marion633b6fd2018-09-14 14:38:53 +02001153 nf->flags = f->frame_flags & VLIB_NODE_FLAG_TRACE;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001154 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001155 }
1156 else
1157 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
1158
Damjan Marion633b6fd2018-09-14 14:38:53 +02001159 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001160
1161 /* Force allocation of new frame while current frame is being
1162 dispatched. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001163 restore_frame = NULL;
1164 if (nf->frame == p->frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001165 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001166 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001167 nf->flags &= ~VLIB_FRAME_IS_ALLOCATED;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001168 if (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH))
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001169 restore_frame = p->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001170 }
1171
1172 /* Frame must be pending. */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001173 ASSERT (f->frame_flags & VLIB_FRAME_PENDING);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001174 ASSERT (f->n_vectors > 0);
1175
1176 /* Copy trace flag from next frame to node.
1177 Trace flag indicates that at least one vector in the dispatched
1178 frame is traced. */
1179 n->flags &= ~VLIB_NODE_FLAG_TRACE;
1180 n->flags |= (nf->flags & VLIB_FRAME_TRACE) ? VLIB_NODE_FLAG_TRACE : 0;
1181 nf->flags &= ~VLIB_FRAME_TRACE;
1182
1183 last_time_stamp = dispatch_node (vm, n,
1184 VLIB_NODE_TYPE_INTERNAL,
1185 VLIB_NODE_STATE_POLLING,
1186 f, last_time_stamp);
Dave Baracha8df85c2019-10-01 13:34:23 -04001187 /* Internal node vector-rate accounting, for summary stats */
1188 vm->internal_node_vectors += f->n_vectors;
1189 vm->internal_node_calls++;
1190 vm->internal_node_last_vectors_per_main_loop =
1191 (f->n_vectors > vm->internal_node_last_vectors_per_main_loop) ?
1192 f->n_vectors : vm->internal_node_last_vectors_per_main_loop;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001193
Damjan Marion296988d2019-02-21 20:24:54 +01001194 f->frame_flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_NO_APPEND);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001195
1196 /* Frame is ready to be used again, so restore it. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001197 if (restore_frame != NULL)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001198 {
Dave Baracha6269992017-06-07 08:18:49 -04001199 /*
1200 * We musn't restore a frame that is flagged to be freed. This
1201 * shouldn't happen since frames to be freed post dispatch are
1202 * those used when the to-node frame becomes full i.e. they form a
1203 * sort of queue of frames to a single node. If we get here then
1204 * the to-node frame and the pending frame *were* the same, and so
1205 * we removed the to-node frame. Therefore this frame is no
1206 * longer part of the queue for that node and hence it cannot be
1207 * it's overspill.
Neale Ranns88170612016-11-22 08:29:51 +00001208 */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001209 ASSERT (!(f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH));
Neale Ranns88170612016-11-22 08:29:51 +00001210
Dave Baracha6269992017-06-07 08:18:49 -04001211 /*
1212 * NB: dispatching node n can result in the creation and scheduling
1213 * of new frames, and hence in the reallocation of nm->pending_frames.
1214 * Recompute p, or no supper. This was broken for more than 10 years.
1215 */
1216 p = nm->pending_frames + pending_frame_index;
1217
1218 /*
1219 * p->next_frame_index can change during node dispatch if node
1220 * function decides to change graph hook up.
1221 */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001222 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001223 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001224
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001225 if (NULL == nf->frame)
Neale Ranns88170612016-11-22 08:29:51 +00001226 {
1227 /* no new frame has been assigned to this node, use the saved one */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001228 nf->frame = restore_frame;
Neale Ranns88170612016-11-22 08:29:51 +00001229 f->n_vectors = 0;
1230 }
1231 else
1232 {
1233 /* The node has gained a frame, implying packets from the current frame
1234 were re-queued to this same node. we don't need the saved one
1235 anymore */
1236 vlib_frame_free (vm, n, f);
1237 }
1238 }
1239 else
Ed Warnickecb9cada2015-12-08 15:45:58 -07001240 {
Damjan Marion633b6fd2018-09-14 14:38:53 +02001241 if (f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH)
Neale Ranns88170612016-11-22 08:29:51 +00001242 {
1243 ASSERT (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH));
1244 vlib_frame_free (vm, n, f);
1245 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001246 }
1247
1248 return last_time_stamp;
1249}
1250
1251always_inline uword
1252vlib_process_stack_is_valid (vlib_process_t * p)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001253{
1254 return p->stack[0] == VLIB_PROCESS_STACK_MAGIC;
1255}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001256
Dave Barach9b8ffd92016-07-08 08:13:45 -04001257typedef struct
1258{
1259 vlib_main_t *vm;
1260 vlib_process_t *process;
1261 vlib_frame_t *frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001262} vlib_process_bootstrap_args_t;
1263
1264/* Called in process stack. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001265static uword
1266vlib_process_bootstrap (uword _a)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001267{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001268 vlib_process_bootstrap_args_t *a;
1269 vlib_main_t *vm;
1270 vlib_node_runtime_t *node;
1271 vlib_frame_t *f;
1272 vlib_process_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001273 uword n;
1274
1275 a = uword_to_pointer (_a, vlib_process_bootstrap_args_t *);
1276
1277 vm = a->vm;
1278 p = a->process;
Damjan Marioncea46522020-05-21 16:47:05 +02001279 vlib_process_finish_switch_stack (vm);
1280
Ed Warnickecb9cada2015-12-08 15:45:58 -07001281 f = a->frame;
1282 node = &p->node_runtime;
1283
1284 n = node->function (vm, node, f);
1285
1286 ASSERT (vlib_process_stack_is_valid (p));
1287
Damjan Marioncea46522020-05-21 16:47:05 +02001288 vlib_process_start_switch_stack (vm, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001289 clib_longjmp (&p->return_longjmp, n);
1290
1291 return n;
1292}
1293
1294/* Called in main stack. */
1295static_always_inline uword
Dave Barach9b8ffd92016-07-08 08:13:45 -04001296vlib_process_startup (vlib_main_t * vm, vlib_process_t * p, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001297{
1298 vlib_process_bootstrap_args_t a;
1299 uword r;
1300
1301 a.vm = vm;
1302 a.process = p;
1303 a.frame = f;
1304
1305 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1306 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001307 {
1308 vlib_process_start_switch_stack (vm, p);
1309 r = clib_calljmp (vlib_process_bootstrap, pointer_to_uword (&a),
1310 (void *) p->stack + (1 << p->log2_n_stack_bytes));
1311 }
1312 else
1313 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001314
1315 return r;
1316}
1317
1318static_always_inline uword
Damjan Marioncea46522020-05-21 16:47:05 +02001319vlib_process_resume (vlib_main_t * vm, vlib_process_t * p)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001320{
1321 uword r;
1322 p->flags &= ~(VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1323 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT
1324 | VLIB_PROCESS_RESUME_PENDING);
1325 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1326 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001327 {
1328 vlib_process_start_switch_stack (vm, p);
1329 clib_longjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_RESUME);
1330 }
1331 else
1332 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001333 return r;
1334}
1335
1336static u64
1337dispatch_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001338 vlib_process_t * p, vlib_frame_t * f, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001339{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001340 vlib_node_main_t *nm = &vm->node_main;
1341 vlib_node_runtime_t *node_runtime = &p->node_runtime;
1342 vlib_node_t *node = vlib_get_node (vm, node_runtime->node_index);
Florin Corasfd542f12018-05-16 19:28:24 -07001343 u32 old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001344 u64 t;
1345 uword n_vectors, is_suspend;
1346
1347 if (node->state != VLIB_NODE_STATE_POLLING
1348 || (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1349 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT)))
1350 return last_time_stamp;
1351
1352 p->flags |= VLIB_PROCESS_IS_RUNNING;
1353
1354 t = last_time_stamp;
1355 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1356 f ? f->n_vectors : 0, /* is_after */ 0);
1357
1358 /* Save away current process for suspend. */
Florin Corasfd542f12018-05-16 19:28:24 -07001359 old_process_index = nm->current_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001360 nm->current_process_index = node->runtime_index;
1361
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001362 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1363 VLIB_NODE_RUNTIME_PERF_BEFORE);
1364
Ed Warnickecb9cada2015-12-08 15:45:58 -07001365 n_vectors = vlib_process_startup (vm, p, f);
1366
Florin Corasfd542f12018-05-16 19:28:24 -07001367 nm->current_process_index = old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001368
1369 ASSERT (n_vectors != VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1370 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1371 if (is_suspend)
1372 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001373 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001374
1375 n_vectors = 0;
1376 pool_get (nm->suspended_process_frames, pf);
1377 pf->node_runtime_index = node->runtime_index;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001378 pf->frame = f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001379 pf->next_frame_index = ~0;
1380
1381 p->n_suspends += 1;
1382 p->suspended_process_frame_index = pf - nm->suspended_process_frames;
1383
1384 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001385 {
1386 TWT (tw_timer_wheel) * tw =
1387 (TWT (tw_timer_wheel) *) nm->timing_wheel;
1388 p->stop_timer_handle =
1389 TW (tw_timer_start) (tw,
1390 vlib_timing_wheel_data_set_suspended_process
1391 (node->runtime_index) /* [sic] pool idex */ ,
1392 0 /* timer_id */ ,
1393 p->resume_clock_interval);
1394 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001395 }
1396 else
1397 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
1398
1399 t = clib_cpu_time_now ();
1400
Dave Barach9b8ffd92016-07-08 08:13:45 -04001401 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, is_suspend,
1402 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001403
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001404 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1405 VLIB_NODE_RUNTIME_PERF_AFTER);
1406
Ed Warnickecb9cada2015-12-08 15:45:58 -07001407 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001408 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001409 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001410 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001411
1412 return t;
1413}
1414
Dave Barach9b8ffd92016-07-08 08:13:45 -04001415void
1416vlib_start_process (vlib_main_t * vm, uword process_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001417{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001418 vlib_node_main_t *nm = &vm->node_main;
1419 vlib_process_t *p = vec_elt (nm->processes, process_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001420 dispatch_process (vm, p, /* frame */ 0, /* cpu_time_now */ 0);
1421}
1422
1423static u64
1424dispatch_suspended_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001425 uword process_index, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001426{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001427 vlib_node_main_t *nm = &vm->node_main;
1428 vlib_node_runtime_t *node_runtime;
1429 vlib_node_t *node;
1430 vlib_frame_t *f;
1431 vlib_process_t *p;
1432 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001433 u64 t, n_vectors, is_suspend;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001434
Ed Warnickecb9cada2015-12-08 15:45:58 -07001435 t = last_time_stamp;
1436
1437 p = vec_elt (nm->processes, process_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001438 if (PREDICT_FALSE (!(p->flags & VLIB_PROCESS_IS_RUNNING)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001439 return last_time_stamp;
1440
1441 ASSERT (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1442 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT));
1443
Florin Coras221d6f12018-11-07 20:46:38 -08001444 pf = pool_elt_at_index (nm->suspended_process_frames,
1445 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001446
1447 node_runtime = &p->node_runtime;
1448 node = vlib_get_node (vm, node_runtime->node_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001449 f = pf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001450
Dave Barach9b8ffd92016-07-08 08:13:45 -04001451 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1452 f ? f->n_vectors : 0, /* is_after */ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001453
1454 /* Save away current process for suspend. */
1455 nm->current_process_index = node->runtime_index;
1456
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001457 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1458 VLIB_NODE_RUNTIME_PERF_BEFORE);
1459
Damjan Marioncea46522020-05-21 16:47:05 +02001460 n_vectors = vlib_process_resume (vm, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001461 t = clib_cpu_time_now ();
1462
1463 nm->current_process_index = ~0;
1464
1465 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1466 if (is_suspend)
1467 {
1468 /* Suspend it again. */
1469 n_vectors = 0;
1470 p->n_suspends += 1;
1471 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001472 {
1473 p->stop_timer_handle =
1474 TW (tw_timer_start) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
1475 vlib_timing_wheel_data_set_suspended_process
1476 (node->runtime_index) /* [sic] pool idex */ ,
1477 0 /* timer_id */ ,
1478 p->resume_clock_interval);
1479 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001480 }
1481 else
1482 {
1483 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
Florin Coras221d6f12018-11-07 20:46:38 -08001484 pool_put_index (nm->suspended_process_frames,
1485 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001486 p->suspended_process_frame_index = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001487 }
1488
1489 t = clib_cpu_time_now ();
Dave Barach9b8ffd92016-07-08 08:13:45 -04001490 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, !is_suspend,
1491 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001492
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001493 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1494 VLIB_NODE_RUNTIME_PERF_AFTER);
1495
Ed Warnickecb9cada2015-12-08 15:45:58 -07001496 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001497 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001498 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001499 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001500
1501 return t;
1502}
1503
Dave Barach2877eee2017-12-15 12:22:57 -05001504void vl_api_send_pending_rpc_requests (vlib_main_t *) __attribute__ ((weak));
1505void
1506vl_api_send_pending_rpc_requests (vlib_main_t * vm)
1507{
1508}
1509
Damjan Marione9d52d52017-03-09 15:42:26 +01001510static_always_inline void
1511vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001512{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001513 vlib_node_main_t *nm = &vm->node_main;
Damjan Marione9d52d52017-03-09 15:42:26 +01001514 vlib_thread_main_t *tm = vlib_get_thread_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001515 uword i;
1516 u64 cpu_time_now;
Dave Barach000a0292020-02-17 17:07:12 -05001517 f64 now;
Damjan Marione9d52d52017-03-09 15:42:26 +01001518 vlib_frame_queue_main_t *fqm;
Dave Barach80965f52019-03-11 09:57:38 -04001519 u32 frame_queue_check_counter = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001520
1521 /* Initialize pending node vector. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001522 if (is_main)
1523 {
1524 vec_resize (nm->pending_frames, 32);
1525 _vec_len (nm->pending_frames) = 0;
1526 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001527
1528 /* Mark time of main loop start. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001529 if (is_main)
1530 {
1531 cpu_time_now = vm->clib_time.last_cpu_time;
1532 vm->cpu_time_main_loop_start = cpu_time_now;
1533 }
1534 else
1535 cpu_time_now = clib_cpu_time_now ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001536
Damjan Marion2c2b6402017-03-28 14:16:15 +02001537 /* Pre-allocate interupt runtime indices and lock. */
Damjan Marion94100532020-11-06 23:25:57 +01001538 vec_alloc_aligned (nm->pending_interrupts, 1, CLIB_CACHE_LINE_BYTES);
Damjan Marion2c2b6402017-03-28 14:16:15 +02001539
1540 /* Pre-allocate expired nodes. */
Steven7312cc72017-03-15 21:18:55 -07001541 if (!nm->polling_threshold_vector_length)
1542 nm->polling_threshold_vector_length = 10;
1543 if (!nm->interrupt_threshold_vector_length)
1544 nm->interrupt_threshold_vector_length = 5;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001545
Damjan Marion29c0b332019-01-28 13:41:27 +01001546 vm->cpu_id = clib_get_current_cpu_id ();
1547 vm->numa_node = clib_get_current_numa_node ();
Florin Coras4c959952020-02-09 18:09:31 +00001548 os_set_numa_index (vm->numa_node);
Damjan Marion29c0b332019-01-28 13:41:27 +01001549
Ed Warnickecb9cada2015-12-08 15:45:58 -07001550 /* Start all processes. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001551 if (is_main)
1552 {
1553 uword i;
Dave Barachc602b382019-06-03 19:48:22 -04001554
1555 /*
1556 * Perform an initial barrier sync. Pays no attention to
1557 * the barrier sync hold-down timer scheme, which won't work
1558 * at this point in time.
1559 */
1560 vlib_worker_thread_initial_barrier_sync_and_release (vm);
1561
Stevenf3b53642017-05-01 14:03:02 -07001562 nm->current_process_index = ~0;
Damjan Marione9d52d52017-03-09 15:42:26 +01001563 for (i = 0; i < vec_len (nm->processes); i++)
1564 cpu_time_now = dispatch_process (vm, nm->processes[i], /* frame */ 0,
1565 cpu_time_now);
1566 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001567
1568 while (1)
1569 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001570 vlib_node_runtime_t *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001571
Dave Barach2877eee2017-12-15 12:22:57 -05001572 if (PREDICT_FALSE (_vec_len (vm->pending_rpc_requests) > 0))
Dave Barachf6c68d72018-11-01 08:12:52 -04001573 {
1574 if (!is_main)
1575 vl_api_send_pending_rpc_requests (vm);
1576 }
Dave Barach2877eee2017-12-15 12:22:57 -05001577
Damjan Marione9d52d52017-03-09 15:42:26 +01001578 if (!is_main)
Damjan Marionf6e6c782020-09-17 09:54:07 +02001579 vlib_worker_thread_barrier_check ();
1580
1581 if (PREDICT_FALSE (vm->check_frame_queues + frame_queue_check_counter))
Damjan Marione9d52d52017-03-09 15:42:26 +01001582 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001583 u32 processed = 0;
Damjan Marioneee099e2021-05-01 14:56:13 +02001584 vlib_frame_queue_dequeue_fn_t *fn =
1585 vlib_buffer_func_main.frame_queue_dequeue_fn;
Damjan Marionf6e6c782020-09-17 09:54:07 +02001586
1587 if (vm->check_frame_queues)
Dave Barach80965f52019-03-11 09:57:38 -04001588 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001589 frame_queue_check_counter = 100;
1590 vm->check_frame_queues = 0;
Dave Barach80965f52019-03-11 09:57:38 -04001591 }
Damjan Marionf6e6c782020-09-17 09:54:07 +02001592
1593 vec_foreach (fqm, tm->frame_queue_mains)
Damjan Marioneee099e2021-05-01 14:56:13 +02001594 processed += (fn) (vm, fqm);
Damjan Marionf6e6c782020-09-17 09:54:07 +02001595
1596 /* No handoff queue work found? */
1597 if (processed)
1598 frame_queue_check_counter = 100;
1599 else
1600 frame_queue_check_counter--;
Damjan Marione9d52d52017-03-09 15:42:26 +01001601 }
1602
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001603 if (PREDICT_FALSE (vec_len (vm->worker_thread_main_loop_callbacks)))
1604 clib_call_callbacks (vm->worker_thread_main_loop_callbacks, vm,
1605 cpu_time_now);
1606
Ed Warnickecb9cada2015-12-08 15:45:58 -07001607 /* Process pre-input nodes. */
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001608 cpu_time_now = clib_cpu_time_now ();
Damjan Marionceab7882018-01-19 20:56:12 +01001609 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1610 cpu_time_now = dispatch_node (vm, n,
1611 VLIB_NODE_TYPE_PRE_INPUT,
1612 VLIB_NODE_STATE_POLLING,
1613 /* frame */ 0,
1614 cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001615
1616 /* Next process input nodes. */
1617 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1618 cpu_time_now = dispatch_node (vm, n,
1619 VLIB_NODE_TYPE_INPUT,
1620 VLIB_NODE_STATE_POLLING,
1621 /* frame */ 0,
1622 cpu_time_now);
1623
Damjan Marione9d52d52017-03-09 15:42:26 +01001624 if (PREDICT_TRUE (is_main && vm->queue_signal_pending == 0))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001625 vm->queue_signal_callback (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001626
Damjan Marion94100532020-11-06 23:25:57 +01001627 if (__atomic_load_n (nm->pending_interrupts, __ATOMIC_ACQUIRE))
Damjan Marion0b316302020-09-09 18:55:16 +02001628 {
Damjan Marion94100532020-11-06 23:25:57 +01001629 int int_num = -1;
1630 *nm->pending_interrupts = 0;
Dave Barachd47c5092018-01-19 13:09:20 -05001631
Damjan Marion94100532020-11-06 23:25:57 +01001632 while ((int_num =
1633 clib_interrupt_get_next (nm->interrupts, int_num)) != -1)
1634 {
1635 vlib_node_runtime_t *n;
1636 clib_interrupt_clear (nm->interrupts, int_num);
1637 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1638 int_num);
1639 cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,
1640 VLIB_NODE_STATE_INTERRUPT,
1641 /* frame */ 0, cpu_time_now);
1642 }
Damjan Marion1033b492020-06-03 12:20:41 +02001643 }
1644
Dave Barache3248982018-08-14 13:47:58 -04001645 /* Input nodes may have added work to the pending vector.
1646 Process pending vector until there is nothing left.
1647 All pending vectors will be processed from input -> output. */
1648 for (i = 0; i < _vec_len (nm->pending_frames); i++)
1649 cpu_time_now = dispatch_pending_node (vm, i, cpu_time_now);
1650 /* Reset pending vector for next iteration. */
1651 _vec_len (nm->pending_frames) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001652
Damjan Marione9d52d52017-03-09 15:42:26 +01001653 if (is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001654 {
Dave Barach900cbad2019-01-31 19:12:51 -05001655 /* *INDENT-OFF* */
1656 ELOG_TYPE_DECLARE (es) =
1657 {
1658 .format = "process tw start",
1659 .format_args = "",
1660 };
1661 ELOG_TYPE_DECLARE (ee) =
1662 {
1663 .format = "process tw end: %d",
1664 .format_args = "i4",
1665 };
1666 /* *INDENT-ON* */
1667
1668 struct
1669 {
1670 int nready_procs;
1671 } *ed;
1672
Damjan Marione9d52d52017-03-09 15:42:26 +01001673 /* Check if process nodes have expired from timing wheel. */
Dave Barach5c20a012017-06-13 08:48:31 -04001674 ASSERT (nm->data_from_advancing_timing_wheel != 0);
1675
Dave Barach900cbad2019-01-31 19:12:51 -05001676 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1677 ed = ELOG_DATA (&vlib_global_main.elog_main, es);
1678
Dave Barach5c20a012017-06-13 08:48:31 -04001679 nm->data_from_advancing_timing_wheel =
1680 TW (tw_timer_expire_timers_vec)
1681 ((TWT (tw_timer_wheel) *) nm->timing_wheel, vlib_time_now (vm),
1682 nm->data_from_advancing_timing_wheel);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001683
Damjan Marione9d52d52017-03-09 15:42:26 +01001684 ASSERT (nm->data_from_advancing_timing_wheel != 0);
Dave Barach5c20a012017-06-13 08:48:31 -04001685
Dave Barach900cbad2019-01-31 19:12:51 -05001686 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1687 {
1688 ed = ELOG_DATA (&vlib_global_main.elog_main, ee);
1689 ed->nready_procs =
1690 _vec_len (nm->data_from_advancing_timing_wheel);
1691 }
1692
Damjan Marione9d52d52017-03-09 15:42:26 +01001693 if (PREDICT_FALSE
1694 (_vec_len (nm->data_from_advancing_timing_wheel) > 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001695 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001696 uword i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001697
Damjan Marione9d52d52017-03-09 15:42:26 +01001698 for (i = 0; i < _vec_len (nm->data_from_advancing_timing_wheel);
1699 i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001700 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001701 u32 d = nm->data_from_advancing_timing_wheel[i];
1702 u32 di = vlib_timing_wheel_data_get_index (d);
1703
1704 if (vlib_timing_wheel_data_is_timed_event (d))
1705 {
1706 vlib_signal_timed_event_data_t *te =
1707 pool_elt_at_index (nm->signal_timed_event_data_pool,
1708 di);
1709 vlib_node_t *n =
1710 vlib_get_node (vm, te->process_node_index);
1711 vlib_process_t *p =
1712 vec_elt (nm->processes, n->runtime_index);
1713 void *data;
1714 data =
1715 vlib_process_signal_event_helper (nm, n, p,
1716 te->event_type_index,
1717 te->n_data_elts,
1718 te->n_data_elt_bytes);
1719 if (te->n_data_bytes < sizeof (te->inline_event_data))
Dave Barach178cf492018-11-13 16:34:13 -05001720 clib_memcpy_fast (data, te->inline_event_data,
1721 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001722 else
1723 {
Dave Barach178cf492018-11-13 16:34:13 -05001724 clib_memcpy_fast (data, te->event_data_as_vector,
1725 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001726 vec_free (te->event_data_as_vector);
1727 }
1728 pool_put (nm->signal_timed_event_data_pool, te);
1729 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001730 else
1731 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001732 cpu_time_now = clib_cpu_time_now ();
1733 cpu_time_now =
1734 dispatch_suspended_process (vm, di, cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001735 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001736 }
Damjan Marione9d52d52017-03-09 15:42:26 +01001737 _vec_len (nm->data_from_advancing_timing_wheel) = 0;
1738 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001739 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001740 vlib_increment_main_loop_counter (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001741 /* Record time stamp in case there are no enabled nodes and above
Dave Barach9b8ffd92016-07-08 08:13:45 -04001742 calls do not update time stamp. */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001743 cpu_time_now = clib_cpu_time_now ();
Dave Barach000a0292020-02-17 17:07:12 -05001744 vm->loops_this_reporting_interval++;
1745 now = clib_time_now_internal (&vm->clib_time, cpu_time_now);
1746 /* Time to update loops_per_second? */
1747 if (PREDICT_FALSE (now >= vm->loop_interval_end))
1748 {
1749 /* Next sample ends in 20ms */
1750 if (vm->loop_interval_start)
1751 {
1752 f64 this_loops_per_second;
1753
1754 this_loops_per_second =
1755 ((f64) vm->loops_this_reporting_interval) / (now -
1756 vm->loop_interval_start);
1757
1758 vm->loops_per_second =
1759 vm->loops_per_second * vm->damping_constant +
1760 (1.0 - vm->damping_constant) * this_loops_per_second;
1761 if (vm->loops_per_second != 0.0)
1762 vm->seconds_per_loop = 1.0 / vm->loops_per_second;
1763 else
1764 vm->seconds_per_loop = 0.0;
1765 }
1766 /* New interval starts now, and ends in 20ms */
1767 vm->loop_interval_start = now;
1768 vm->loop_interval_end = now + 2e-4;
1769 vm->loops_this_reporting_interval = 0;
1770 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001771 }
1772}
Dave Barach9b8ffd92016-07-08 08:13:45 -04001773
Damjan Marione9d52d52017-03-09 15:42:26 +01001774static void
1775vlib_main_loop (vlib_main_t * vm)
1776{
1777 vlib_main_or_worker_loop (vm, /* is_main */ 1);
1778}
1779
1780void
1781vlib_worker_loop (vlib_main_t * vm)
1782{
1783 vlib_main_or_worker_loop (vm, /* is_main */ 0);
1784}
1785
Damjan Marionfd8deb42021-03-06 12:26:28 +01001786vlib_global_main_t vlib_global_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001787
Damjan Marion25ab6c52021-03-05 14:41:25 +01001788void
1789vlib_add_del_post_mortem_callback (void *cb, int is_add)
1790{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001791 vlib_global_main_t *vgm = vlib_get_global_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001792 int i;
1793
1794 if (is_add == 0)
1795 {
Damjan Marionfd8deb42021-03-06 12:26:28 +01001796 for (i = vec_len (vgm->post_mortem_callbacks) - 1; i >= 0; i--)
1797 if (vgm->post_mortem_callbacks[i] == cb)
1798 vec_del1 (vgm->post_mortem_callbacks, i);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001799 return;
1800 }
1801
Damjan Marionfd8deb42021-03-06 12:26:28 +01001802 for (i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
1803 if (vgm->post_mortem_callbacks[i] == cb)
Damjan Marion25ab6c52021-03-05 14:41:25 +01001804 return;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001805 vec_add1 (vgm->post_mortem_callbacks, cb);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001806}
1807
1808static void
1809elog_post_mortem_dump (void)
1810{
Damjan Marionf553a2c2021-03-26 13:45:37 +01001811 elog_main_t *em = vlib_get_elog_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001812
1813 u8 *filename;
1814 clib_error_t *error;
1815
1816 filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
1817 error = elog_write_file (em, (char *) filename, 1 /* flush ring */);
1818 if (error)
1819 clib_error_report (error);
1820 /*
1821 * We're in the middle of crashing. Don't try to free the filename.
1822 */
1823}
1824
Ed Warnickecb9cada2015-12-08 15:45:58 -07001825static clib_error_t *
1826vlib_main_configure (vlib_main_t * vm, unformat_input_t * input)
1827{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001828 vlib_global_main_t *vgm = vlib_get_global_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001829 int turn_on_mem_trace = 0;
1830
1831 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1832 {
1833 if (unformat (input, "memory-trace"))
1834 turn_on_mem_trace = 1;
1835
1836 else if (unformat (input, "elog-events %d",
Damjan Marionfd8deb42021-03-06 12:26:28 +01001837 &vgm->configured_elog_ring_size))
1838 vgm->configured_elog_ring_size =
1839 1 << max_log2 (vgm->configured_elog_ring_size);
Dave Barach81481312017-05-16 09:08:14 -04001840 else if (unformat (input, "elog-post-mortem-dump"))
Damjan Marion25ab6c52021-03-05 14:41:25 +01001841 vlib_add_del_post_mortem_callback (elog_post_mortem_dump,
1842 /* is_add */ 1);
Dave Barachc74b43c2020-04-09 17:24:07 -04001843 else if (unformat (input, "buffer-alloc-success-rate %f",
1844 &vm->buffer_alloc_success_rate))
1845 {
1846 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1847 return clib_error_return
1848 (0, "Buffer fault injection not configured");
1849 }
1850 else if (unformat (input, "buffer-alloc-success-seed %u",
1851 &vm->buffer_alloc_success_seed))
1852 {
1853 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1854 return clib_error_return
1855 (0, "Buffer fault injection not configured");
1856 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001857 else
1858 return unformat_parse_error (input);
1859 }
1860
1861 unformat_free (input);
1862
1863 /* Enable memory trace as early as possible. */
1864 if (turn_on_mem_trace)
1865 clib_mem_trace (1);
1866
1867 return 0;
1868}
1869
1870VLIB_EARLY_CONFIG_FUNCTION (vlib_main_configure, "vlib");
1871
Dave Barach9b8ffd92016-07-08 08:13:45 -04001872static void
Dave Barach11fb09e2020-08-06 12:10:09 -04001873placeholder_queue_signal_callback (vlib_main_t * vm)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001874{
1875}
Dave Barach16c75df2016-05-31 14:05:46 -04001876
Dave Barach1f806582018-06-14 09:18:21 -04001877#define foreach_weak_reference_stub \
1878_(vlib_map_stat_segment_init) \
1879_(vpe_api_init) \
1880_(vlibmemory_init) \
1881_(map_api_segment_init)
1882
1883#define _(name) \
1884clib_error_t *name (vlib_main_t *vm) __attribute__((weak)); \
1885clib_error_t *name (vlib_main_t *vm) { return 0; }
1886foreach_weak_reference_stub;
1887#undef _
1888
Dave Barachb09f4d02019-07-15 16:00:03 -04001889void vl_api_set_elog_main (elog_main_t * m) __attribute__ ((weak));
1890void
1891vl_api_set_elog_main (elog_main_t * m)
1892{
1893 clib_warning ("STUB");
1894}
1895
1896int vl_api_set_elog_trace_api_messages (int enable) __attribute__ ((weak));
1897int
1898vl_api_set_elog_trace_api_messages (int enable)
1899{
1900 clib_warning ("STUB");
1901 return 0;
1902}
1903
1904int vl_api_get_elog_trace_api_messages (void) __attribute__ ((weak));
1905int
1906vl_api_get_elog_trace_api_messages (void)
1907{
1908 clib_warning ("STUB");
1909 return 0;
1910}
1911
Ed Warnickecb9cada2015-12-08 15:45:58 -07001912/* Main function. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001913int
Eyal Barid334a6b2016-09-19 10:23:39 +03001914vlib_main (vlib_main_t * volatile vm, unformat_input_t * input)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001915{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001916 vlib_global_main_t *vgm = vlib_get_global_main ();
Eyal Barid334a6b2016-09-19 10:23:39 +03001917 clib_error_t *volatile error;
Dave Barach5c20a012017-06-13 08:48:31 -04001918 vlib_node_main_t *nm = &vm->node_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001919
Dave Barach11fb09e2020-08-06 12:10:09 -04001920 vm->queue_signal_callback = placeholder_queue_signal_callback;
Dave Barach16c75df2016-05-31 14:05:46 -04001921
Dave Barachbc867c32020-11-25 10:07:09 -05001922 /* Reconfigure event log which is enabled very early */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001923 if (vgm->configured_elog_ring_size &&
1924 vgm->configured_elog_ring_size != vgm->elog_main.event_ring_size)
1925 elog_resize (&vgm->elog_main, vgm->configured_elog_ring_size);
Damjan Marionf553a2c2021-03-26 13:45:37 +01001926 vl_api_set_elog_main (vlib_get_elog_main ());
Dave Barachb09f4d02019-07-15 16:00:03 -04001927 (void) vl_api_set_elog_trace_api_messages (1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001928
1929 /* Default name. */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001930 if (!vgm->name)
1931 vgm->name = "VLIB";
Ed Warnickecb9cada2015-12-08 15:45:58 -07001932
Damjan Marion68b4da62018-09-30 18:26:20 +02001933 if ((error = vlib_physmem_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001934 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001935 clib_error_report (error);
1936 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001937 }
Damjan Marion49d66f12017-07-20 18:10:35 +02001938
Filip Tehlard2bbdef2019-02-22 05:05:53 -08001939 if ((error = vlib_map_stat_segment_init (vm)))
1940 {
1941 clib_error_report (error);
1942 goto done;
1943 }
1944
Damjan Marion49d66f12017-07-20 18:10:35 +02001945 if ((error = vlib_buffer_main_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001946 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001947 clib_error_report (error);
1948 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001949 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001950
1951 if ((error = vlib_thread_init (vm)))
1952 {
1953 clib_error_report (error);
1954 goto done;
1955 }
1956
Damjan Mariona31698b2021-03-10 14:35:28 +01001957 /* Register node ifunction variants */
1958 vlib_register_all_node_march_variants (vm);
1959
Ed Warnickecb9cada2015-12-08 15:45:58 -07001960 /* Register static nodes so that init functions may use them. */
1961 vlib_register_all_static_nodes (vm);
1962
1963 /* Set seed for random number generator.
1964 Allow user to specify seed to make random sequence deterministic. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001965 if (!unformat (input, "seed %wd", &vm->random_seed))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001966 vm->random_seed = clib_cpu_time_now ();
1967 clib_random_buffer_init (&vm->random_buffer, vm->random_seed);
1968
Ed Warnickecb9cada2015-12-08 15:45:58 -07001969 /* Initialize node graph. */
1970 if ((error = vlib_node_main_init (vm)))
1971 {
1972 /* Arrange for graph hook up error to not be fatal when debugging. */
1973 if (CLIB_DEBUG > 0)
1974 clib_error_report (error);
1975 else
1976 goto done;
1977 }
1978
Dave Barach1f806582018-06-14 09:18:21 -04001979 /* Direct call / weak reference, for vlib standalone use-cases */
1980 if ((error = vpe_api_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001981 {
1982 clib_error_report (error);
1983 goto done;
1984 }
1985
Dave Barach1f806582018-06-14 09:18:21 -04001986 if ((error = vlibmemory_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001987 {
1988 clib_error_report (error);
1989 goto done;
1990 }
1991
Dave Barach1f806582018-06-14 09:18:21 -04001992 if ((error = map_api_segment_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001993 {
1994 clib_error_report (error);
1995 goto done;
1996 }
1997
Ole Troan964f93e2016-06-10 13:22:36 +02001998 /* See unix/main.c; most likely already set up */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001999 if (vgm->init_functions_called == 0)
2000 vgm->init_functions_called = hash_create (0, /* value bytes */ 0);
Ole Troan964f93e2016-06-10 13:22:36 +02002001 if ((error = vlib_call_all_init_functions (vm)))
2002 goto done;
2003
Dave Barach5c20a012017-06-13 08:48:31 -04002004 nm->timing_wheel = clib_mem_alloc_aligned (sizeof (TWT (tw_timer_wheel)),
2005 CLIB_CACHE_LINE_BYTES);
2006
2007 vec_validate (nm->data_from_advancing_timing_wheel, 10);
2008 _vec_len (nm->data_from_advancing_timing_wheel) = 0;
2009
2010 /* Create the process timing wheel */
2011 TW (tw_timer_wheel_init) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
2012 0 /* no callback */ ,
2013 10e-6 /* timer period 10us */ ,
2014 ~0 /* max expirations per call */ );
2015
Dave Barach2877eee2017-12-15 12:22:57 -05002016 vec_validate (vm->pending_rpc_requests, 0);
2017 _vec_len (vm->pending_rpc_requests) = 0;
Dave Barachf6c68d72018-11-01 08:12:52 -04002018 vec_validate (vm->processing_rpc_requests, 0);
2019 _vec_len (vm->processing_rpc_requests) = 0;
Dave Barach2877eee2017-12-15 12:22:57 -05002020
Dave Barachc74b43c2020-04-09 17:24:07 -04002021 /* Default params for the buffer allocator fault injector, if configured */
2022 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
2023 {
2024 vm->buffer_alloc_success_seed = 0xdeaddabe;
2025 vm->buffer_alloc_success_rate = 0.80;
2026 }
2027
Dave Barachd1e17d02019-03-21 18:01:48 -04002028 if ((error = vlib_call_all_config_functions (vm, input, 0 /* is_early */ )))
2029 goto done;
2030
Dave Barach000a0292020-02-17 17:07:12 -05002031 /*
2032 * Use exponential smoothing, with a half-life of 1 second
2033 * reported_rate(t) = reported_rate(t-1) * K + rate(t)*(1-K)
2034 *
2035 * Sample every 20ms, aka 50 samples per second
2036 * K = exp (-1.0/20.0);
2037 * K = 0.95
2038 */
2039 vm->damping_constant = exp (-1.0 / 20.0);
2040
Dave Barachc602b382019-06-03 19:48:22 -04002041 /* Sort per-thread init functions before we start threads */
Damjan Marionfd8deb42021-03-06 12:26:28 +01002042 vlib_sort_init_exit_functions (&vgm->worker_init_function_registrations);
Dave Barachc602b382019-06-03 19:48:22 -04002043
Dave Barachd1e17d02019-03-21 18:01:48 -04002044 /* Call all main loop enter functions. */
2045 {
2046 clib_error_t *sub_error;
2047 sub_error = vlib_call_all_main_loop_enter_functions (vm);
2048 if (sub_error)
2049 clib_error_report (sub_error);
2050 }
2051
Ed Warnickecb9cada2015-12-08 15:45:58 -07002052 switch (clib_setjmp (&vm->main_loop_exit, VLIB_MAIN_LOOP_EXIT_NONE))
2053 {
2054 case VLIB_MAIN_LOOP_EXIT_NONE:
2055 vm->main_loop_exit_set = 1;
2056 break;
2057
2058 case VLIB_MAIN_LOOP_EXIT_CLI:
2059 goto done;
2060
2061 default:
2062 error = vm->main_loop_error;
2063 goto done;
2064 }
2065
Ed Warnickecb9cada2015-12-08 15:45:58 -07002066 vlib_main_loop (vm);
2067
Dave Barach9b8ffd92016-07-08 08:13:45 -04002068done:
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302069 vlib_worker_thread_barrier_sync (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002070 /* Call all exit functions. */
2071 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04002072 clib_error_t *sub_error;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002073 sub_error = vlib_call_all_main_loop_exit_functions (vm);
2074 if (sub_error)
2075 clib_error_report (sub_error);
2076 }
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302077 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002078
2079 if (error)
2080 clib_error_report (error);
2081
Pierre Pfisterc26cc722021-09-10 16:38:03 +02002082 return vm->main_loop_exit_status;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002083}
Dave Barach9b8ffd92016-07-08 08:13:45 -04002084
Dave Barachab1a50c2020-10-06 14:08:16 -04002085vlib_main_t *
2086vlib_get_main_not_inline (void)
2087{
2088 return vlib_get_main ();
2089}
2090
2091elog_main_t *
2092vlib_get_elog_main_not_inline ()
2093{
2094 return &vlib_global_main.elog_main;
2095}
2096
Pierre Pfisterc26cc722021-09-10 16:38:03 +02002097void
2098vlib_exit_with_status (vlib_main_t *vm, int status)
2099{
2100 vm->main_loop_exit_status = status;
2101 __atomic_store_n (&vm->main_loop_exit_now, 1, __ATOMIC_RELEASE);
2102}
2103
Dave Barach9b8ffd92016-07-08 08:13:45 -04002104/*
2105 * fd.io coding-style-patch-verification: ON
2106 *
2107 * Local Variables:
2108 * eval: (c-set-style "gnu")
2109 * End:
2110 */