blob: bee63970f3d15c0b26230efc6dee9fc35781217e [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * main.c: main vector processing loop
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <math.h>
41#include <vppinfra/format.h>
42#include <vlib/vlib.h>
43#include <vlib/threads.h>
Dave Barach5c20a012017-06-13 08:48:31 -040044#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
Damjan Marion04a7f052017-07-10 15:06:17 +020046#include <vlib/unix/unix.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048/* Actually allocate a few extra slots of vector data to support
49 speculative vector enqueues which overflow vector data in next frame. */
50#define VLIB_FRAME_SIZE_ALLOC (VLIB_FRAME_SIZE + 4)
51
52always_inline u32
53vlib_frame_bytes (u32 n_scalar_bytes, u32 n_vector_bytes)
54{
55 u32 n_bytes;
56
57 /* Make room for vlib_frame_t plus scalar arguments. */
58 n_bytes = vlib_frame_vector_byte_offset (n_scalar_bytes);
59
60 /* Make room for vector arguments.
61 Allocate a few extra slots of vector data to support
62 speculative vector enqueues which overflow vector data in next frame. */
63#define VLIB_FRAME_SIZE_EXTRA 4
64 n_bytes += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * n_vector_bytes;
65
66 /* Magic number is first 32bit number after vector data.
67 Used to make sure that vector data is never overrun. */
68#define VLIB_FRAME_MAGIC (0xabadc0ed)
69 n_bytes += sizeof (u32);
70
71 /* Pad to cache line. */
72 n_bytes = round_pow2 (n_bytes, CLIB_CACHE_LINE_BYTES);
73
74 return n_bytes;
75}
76
77always_inline u32 *
78vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node)
79{
Dave Barach9b8ffd92016-07-08 08:13:45 -040080 void *p = f;
Ed Warnickecb9cada2015-12-08 15:45:58 -070081
82 p += vlib_frame_vector_byte_offset (node->scalar_size);
83
84 p += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * node->vector_size;
85
86 return p;
87}
88
Dave Barach593eedf2019-03-10 09:44:51 -040089static inline vlib_frame_size_t *
Ed Warnickecb9cada2015-12-08 15:45:58 -070090get_frame_size_info (vlib_node_main_t * nm,
91 u32 n_scalar_bytes, u32 n_vector_bytes)
92{
Dave Barach593eedf2019-03-10 09:44:51 -040093#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
Ed Warnickecb9cada2015-12-08 15:45:58 -070094 uword key = (n_scalar_bytes << 16) | n_vector_bytes;
Dave Barach9b8ffd92016-07-08 08:13:45 -040095 uword *p, i;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97 p = hash_get (nm->frame_size_hash, key);
98 if (p)
99 i = p[0];
100 else
101 {
102 i = vec_len (nm->frame_sizes);
103 vec_validate (nm->frame_sizes, i);
104 hash_set (nm->frame_size_hash, key, i);
105 }
106
107 return vec_elt_at_index (nm->frame_sizes, i);
Dave Barach593eedf2019-03-10 09:44:51 -0400108#else
109 ASSERT (vlib_frame_bytes (n_scalar_bytes, n_vector_bytes)
110 == (vlib_frame_bytes (0, 4)));
111 return vec_elt_at_index (nm->frame_sizes, 0);
112#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113}
114
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200115static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400116vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index,
117 u32 frame_flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400119 vlib_node_main_t *nm = &vm->node_main;
120 vlib_frame_size_t *fs;
121 vlib_node_t *to_node;
122 vlib_frame_t *f;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200123 u32 l, n, scalar_size, vector_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500125 ASSERT (vm == vlib_get_main ());
126
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127 to_node = vlib_get_node (vm, to_node_index);
128
129 scalar_size = to_node->scalar_size;
130 vector_size = to_node->vector_size;
131
132 fs = get_frame_size_info (nm, scalar_size, vector_size);
133 n = vlib_frame_bytes (scalar_size, vector_size);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200134 if ((l = vec_len (fs->free_frames)) > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700135 {
136 /* Allocate from end of free list. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200137 f = fs->free_frames[l - 1];
138 _vec_len (fs->free_frames) = l - 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139 }
140 else
141 {
Damjan Marion3f46baf2016-02-06 19:16:21 +0100142 f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143 }
144
145 /* Poison frame when debugging. */
146 if (CLIB_DEBUG > 0)
Dave Barachb7b92992018-10-17 10:38:51 -0400147 clib_memset (f, 0xfe, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148
149 /* Insert magic number. */
150 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400151 u32 *magic;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152
153 magic = vlib_frame_find_magic (f, to_node);
154 *magic = VLIB_FRAME_MAGIC;
155 }
156
Damjan Marion633b6fd2018-09-14 14:38:53 +0200157 f->frame_flags = VLIB_FRAME_IS_ALLOCATED | frame_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700158 f->n_vectors = 0;
159 f->scalar_size = scalar_size;
160 f->vector_size = vector_size;
Damjan Mariona3d59862018-11-10 10:23:00 +0100161 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700162
163 fs->n_alloc_frames += 1;
164
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200165 return f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166}
167
168/* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX.
169 Returns frame index. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200170static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400171vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime,
172 u32 to_next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400174 vlib_node_t *from_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175
176 from_node = vlib_get_node (vm, from_node_runtime->node_index);
177 ASSERT (to_next_index < vec_len (from_node->next_nodes));
178
Dave Barach9b8ffd92016-07-08 08:13:45 -0400179 return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index],
Ed Warnickecb9cada2015-12-08 15:45:58 -0700180 /* frame_flags */ 0);
181}
182
183vlib_frame_t *
184vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index)
185{
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200186 vlib_frame_t *f = vlib_frame_alloc_to_node (vm, to_node_index,
187 /* frame_flags */
188 VLIB_FRAME_FREE_AFTER_DISPATCH);
189 return vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190}
191
Dave Barachc74b43c2020-04-09 17:24:07 -0400192static inline void
193vlib_validate_frame_indices (vlib_frame_t * f)
194{
195 if (CLIB_DEBUG > 0)
196 {
197 int i;
198 u32 *from = vlib_frame_vector_args (f);
199
200 /* Check for bad buffer index values */
201 for (i = 0; i < f->n_vectors; i++)
202 {
203 if (from[i] == 0)
204 {
205 clib_warning ("BUG: buffer index 0 at index %d", i);
206 ASSERT (0);
207 }
208 else if (from[i] == 0xfefefefe)
209 {
210 clib_warning ("BUG: frame poison pattern at index %d", i);
211 ASSERT (0);
212 }
213 }
214 }
215}
216
Dave Barach9b8ffd92016-07-08 08:13:45 -0400217void
218vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400220 vlib_pending_frame_t *p;
221 vlib_node_t *to_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700222
223 if (f->n_vectors == 0)
224 return;
225
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500226 ASSERT (vm == vlib_get_main ());
227
Dave Barachc74b43c2020-04-09 17:24:07 -0400228 vlib_validate_frame_indices (f);
229
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230 to_node = vlib_get_node (vm, to_node_index);
231
232 vec_add2 (vm->node_main.pending_frames, p, 1);
233
Damjan Marion633b6fd2018-09-14 14:38:53 +0200234 f->frame_flags |= VLIB_FRAME_PENDING;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200235 p->frame = vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236 p->node_runtime_index = to_node->runtime_index;
237 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
238}
239
240/* Free given frame. */
241void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400242vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400244 vlib_node_main_t *nm = &vm->node_main;
245 vlib_node_t *node;
246 vlib_frame_size_t *fs;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400247
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500248 ASSERT (vm == vlib_get_main ());
Damjan Marion633b6fd2018-09-14 14:38:53 +0200249 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250
251 node = vlib_get_node (vm, r->node_index);
252 fs = get_frame_size_info (nm, node->scalar_size, node->vector_size);
253
Damjan Marion633b6fd2018-09-14 14:38:53 +0200254 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255
256 /* No next frames may point to freed frame. */
257 if (CLIB_DEBUG > 0)
258 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400259 vlib_next_frame_t *nf;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200260 vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame != f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261 }
262
Damjan Marion296988d2019-02-21 20:24:54 +0100263 f->frame_flags &= ~(VLIB_FRAME_IS_ALLOCATED | VLIB_FRAME_NO_APPEND);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200265 vec_add1 (fs->free_frames, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266 ASSERT (fs->n_alloc_frames > 0);
267 fs->n_alloc_frames -= 1;
268}
269
270static clib_error_t *
271show_frame_stats (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400274 vlib_node_main_t *nm = &vm->node_main;
275 vlib_frame_size_t *fs;
276
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277 vlib_cli_output (vm, "%=6s%=12s%=12s", "Size", "# Alloc", "# Free");
278 vec_foreach (fs, nm->frame_sizes)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400279 {
280 u32 n_alloc = fs->n_alloc_frames;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200281 u32 n_free = vec_len (fs->free_frames);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282
Dave Barach9b8ffd92016-07-08 08:13:45 -0400283 if (n_alloc + n_free > 0)
284 vlib_cli_output (vm, "%=6d%=12d%=12d",
285 fs - nm->frame_sizes, n_alloc, n_free);
286 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
288 return 0;
289}
290
Dave Barach9b8ffd92016-07-08 08:13:45 -0400291/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700292VLIB_CLI_COMMAND (show_frame_stats_cli, static) = {
293 .path = "show vlib frame-allocation",
294 .short_help = "Show node dispatch frame statistics",
295 .function = show_frame_stats,
296};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400297/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298
299/* Change ownership of enqueue rights to given next node. */
300static void
301vlib_next_frame_change_ownership (vlib_main_t * vm,
302 vlib_node_runtime_t * node_runtime,
303 u32 next_index)
304{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400305 vlib_node_main_t *nm = &vm->node_main;
306 vlib_next_frame_t *next_frame;
307 vlib_node_t *node, *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
309 node = vec_elt (nm->nodes, node_runtime->node_index);
310
311 /* Only internal & input nodes are allowed to call other nodes. */
312 ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL
313 || node->type == VLIB_NODE_TYPE_INPUT
314 || node->type == VLIB_NODE_TYPE_PROCESS);
315
316 ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes);
317
Dave Barach9b8ffd92016-07-08 08:13:45 -0400318 next_frame =
319 vlib_node_runtime_get_next_frame (vm, node_runtime, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700320 next_node = vec_elt (nm->nodes, node->next_nodes[next_index]);
321
322 if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX)
323 {
324 /* Get frame from previous owner. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400325 vlib_next_frame_t *owner_next_frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326 vlib_next_frame_t tmp;
327
328 owner_next_frame =
329 vlib_node_get_next_frame (vm,
330 next_node->owner_node_index,
331 next_node->owner_next_index);
332
333 /* Swap target next frame with owner's. */
334 tmp = owner_next_frame[0];
335 owner_next_frame[0] = next_frame[0];
336 next_frame[0] = tmp;
337
338 /*
339 * If next_frame is already pending, we have to track down
340 * all pending frames and fix their next_frame_index fields.
341 */
342 if (next_frame->flags & VLIB_FRAME_PENDING)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400343 {
344 vlib_pending_frame_t *p;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200345 if (next_frame->frame != NULL)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400346 {
347 vec_foreach (p, nm->pending_frames)
348 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200349 if (p->frame == next_frame->frame)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400350 {
351 p->next_frame_index =
352 next_frame - vm->node_main.next_frames;
353 }
354 }
355 }
356 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357 }
358 else
359 {
360 /* No previous owner. Take ownership. */
361 next_frame->flags |= VLIB_FRAME_OWNER;
362 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400363
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364 /* Record new owner. */
365 next_node->owner_node_index = node->index;
366 next_node->owner_next_index = next_index;
367
368 /* Now we should be owner. */
369 ASSERT (next_frame->flags & VLIB_FRAME_OWNER);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400370}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371
372/* Make sure that magic number is still there.
373 Otherwise, it is likely that caller has overrun frame arguments. */
374always_inline void
375validate_frame_magic (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400376 vlib_frame_t * f, vlib_node_t * n, uword next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700377{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400378 vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]);
379 u32 *magic = vlib_frame_find_magic (f, next_node);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380 ASSERT (VLIB_FRAME_MAGIC == magic[0]);
381}
382
383vlib_frame_t *
384vlib_get_next_frame_internal (vlib_main_t * vm,
385 vlib_node_runtime_t * node,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400386 u32 next_index, u32 allocate_new_next_frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400388 vlib_frame_t *f;
389 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390 u32 n_used;
391
392 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
393
394 /* Make sure this next frame owns right to enqueue to destination frame. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400395 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396 vlib_next_frame_change_ownership (vm, node, next_index);
397
398 /* ??? Don't need valid flag: can use frame_index == ~0 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400399 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200401 nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700402 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
403 }
404
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200405 f = nf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700406
407 /* Has frame been removed from pending vector (e.g. finished dispatching)?
408 If so we can reuse frame. */
Damjan Marion633b6fd2018-09-14 14:38:53 +0200409 if ((nf->flags & VLIB_FRAME_PENDING)
410 && !(f->frame_flags & VLIB_FRAME_PENDING))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700411 {
412 nf->flags &= ~VLIB_FRAME_PENDING;
413 f->n_vectors = 0;
Damjan Marion9162c2d2018-11-20 09:55:10 +0100414 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700415 }
416
Damjan Marion296988d2019-02-21 20:24:54 +0100417 /* Allocate new frame if current one is marked as no-append or
418 it is already full. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700419 n_used = f->n_vectors;
Damjan Marion296988d2019-02-21 20:24:54 +0100420 if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0) ||
421 (f->frame_flags & VLIB_FRAME_NO_APPEND))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700422 {
423 /* Old frame may need to be freed after dispatch, since we'll have
Dave Barach9b8ffd92016-07-08 08:13:45 -0400424 two redundant frames from node -> next node. */
425 if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200427 vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame);
Damjan Marion633b6fd2018-09-14 14:38:53 +0200428 f_old->frame_flags |= VLIB_FRAME_FREE_AFTER_DISPATCH;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429 }
430
431 /* Allocate new frame to replace full one. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200432 f = nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700433 n_used = f->n_vectors;
434 }
435
436 /* Should have free vectors in frame now. */
437 ASSERT (n_used < VLIB_FRAME_SIZE);
438
439 if (CLIB_DEBUG > 0)
440 {
441 validate_frame_magic (vm, f,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400442 vlib_get_node (vm, node->node_index), next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700443 }
444
445 return f;
446}
447
448static void
449vlib_put_next_frame_validate (vlib_main_t * vm,
450 vlib_node_runtime_t * rt,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400451 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700452{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400453 vlib_node_main_t *nm = &vm->node_main;
454 vlib_next_frame_t *nf;
455 vlib_frame_t *f;
456 vlib_node_runtime_t *next_rt;
457 vlib_node_t *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458 u32 n_before, n_after;
459
460 nf = vlib_node_runtime_get_next_frame (vm, rt, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200461 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700462
463 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
Dave Barachc74b43c2020-04-09 17:24:07 -0400464
465 vlib_validate_frame_indices (f);
466
Ed Warnickecb9cada2015-12-08 15:45:58 -0700467 n_after = VLIB_FRAME_SIZE - n_vectors_left;
468 n_before = f->n_vectors;
469
470 ASSERT (n_after >= n_before);
471
472 next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
473 nf->node_runtime_index);
474 next_node = vlib_get_node (vm, next_rt->node_index);
475 if (n_after > 0 && next_node->validate_frame)
476 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400477 u8 *msg = next_node->validate_frame (vm, rt, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700478 if (msg)
479 {
480 clib_warning ("%v", msg);
481 ASSERT (0);
482 }
483 vec_free (msg);
484 }
485}
486
487void
488vlib_put_next_frame (vlib_main_t * vm,
489 vlib_node_runtime_t * r,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400490 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400492 vlib_node_main_t *nm = &vm->node_main;
493 vlib_next_frame_t *nf;
494 vlib_frame_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700495 u32 n_vectors_in_frame;
496
Damjan Marion910d3692019-01-21 11:48:34 +0100497 if (CLIB_DEBUG > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700498 vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left);
499
500 nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200501 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700502
503 /* Make sure that magic number is still there. Otherwise, caller
504 has overrun frame meta data. */
505 if (CLIB_DEBUG > 0)
506 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400507 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700508 validate_frame_magic (vm, f, node, next_index);
509 }
510
511 /* Convert # of vectors left -> number of vectors there. */
512 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
513 n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left;
514
515 f->n_vectors = n_vectors_in_frame;
516
517 /* If vectors were added to frame, add to pending vector. */
518 if (PREDICT_TRUE (n_vectors_in_frame > 0))
519 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400520 vlib_pending_frame_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700521 u32 v0, v1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400522
Ed Warnickecb9cada2015-12-08 15:45:58 -0700523 r->cached_next_index = next_index;
524
Damjan Marion633b6fd2018-09-14 14:38:53 +0200525 if (!(f->frame_flags & VLIB_FRAME_PENDING))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400526 {
527 __attribute__ ((unused)) vlib_node_t *node;
528 vlib_node_t *next_node;
529 vlib_node_runtime_t *next_runtime;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700530
Dave Barach9b8ffd92016-07-08 08:13:45 -0400531 node = vlib_get_node (vm, r->node_index);
532 next_node = vlib_get_next_node (vm, r->node_index, next_index);
533 next_runtime = vlib_node_get_runtime (vm, next_node->index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700534
Dave Barach9b8ffd92016-07-08 08:13:45 -0400535 vec_add2 (nm->pending_frames, p, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700536
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200537 p->frame = nf->frame;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400538 p->node_runtime_index = nf->node_runtime_index;
539 p->next_frame_index = nf - nm->next_frames;
540 nf->flags |= VLIB_FRAME_PENDING;
Damjan Marion633b6fd2018-09-14 14:38:53 +0200541 f->frame_flags |= VLIB_FRAME_PENDING;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700542
Dave Barach9b8ffd92016-07-08 08:13:45 -0400543 /*
544 * If we're going to dispatch this frame on another thread,
545 * force allocation of a new frame. Otherwise, we create
546 * a dangling frame reference. Each thread has its own copy of
547 * the next_frames vector.
548 */
Damjan Marion586afd72017-04-05 19:18:20 +0200549 if (0 && r->thread_index != next_runtime->thread_index)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400550 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200551 nf->frame = NULL;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400552 nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED);
553 }
554 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700555
556 /* Copy trace flag from next_frame and from runtime. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400557 nf->flags |=
558 (nf->flags & VLIB_NODE_FLAG_TRACE) | (r->
559 flags & VLIB_NODE_FLAG_TRACE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700560
561 v0 = nf->vectors_since_last_overflow;
562 v1 = v0 + n_vectors_in_frame;
563 nf->vectors_since_last_overflow = v1;
564 if (PREDICT_FALSE (v1 < v0))
565 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400566 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700567 vec_elt (node->n_vectors_by_next_node, next_index) += v0;
568 }
569 }
570}
571
572/* Sync up runtime (32 bit counters) and main node stats (64 bit counters). */
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800573void
574vlib_node_runtime_sync_stats_node (vlib_node_t *n, vlib_node_runtime_t *r,
575 uword n_calls, uword n_vectors,
576 uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700577{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700578 n->stats_total.calls += n_calls + r->calls_since_last_overflow;
579 n->stats_total.vectors += n_vectors + r->vectors_since_last_overflow;
580 n->stats_total.clocks += n_clocks + r->clocks_since_last_overflow;
581 n->stats_total.max_clock = r->max_clock;
582 n->stats_total.max_clock_n = r->max_clock_n;
583
584 r->calls_since_last_overflow = 0;
585 r->vectors_since_last_overflow = 0;
586 r->clocks_since_last_overflow = 0;
587}
588
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800589void
590vlib_node_runtime_sync_stats (vlib_main_t *vm, vlib_node_runtime_t *r,
591 uword n_calls, uword n_vectors, uword n_clocks)
592{
593 vlib_node_t *n = vlib_get_node (vm, r->node_index);
594 vlib_node_runtime_sync_stats_node (n, r, n_calls, n_vectors, n_clocks);
595}
596
Dave Barach9b8ffd92016-07-08 08:13:45 -0400597always_inline void __attribute__ ((unused))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700598vlib_process_sync_stats (vlib_main_t * vm,
599 vlib_process_t * p,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000600 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400602 vlib_node_runtime_t *rt = &p->node_runtime;
603 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000604 vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700605 n->stats_total.suspends += p->n_suspends;
606 p->n_suspends = 0;
607}
608
Dave Barach9b8ffd92016-07-08 08:13:45 -0400609void
610vlib_node_sync_stats (vlib_main_t * vm, vlib_node_t * n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700611{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400612 vlib_node_runtime_t *rt;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700613
614 if (n->type == VLIB_NODE_TYPE_PROCESS)
615 {
616 /* Nothing to do for PROCESS nodes except in main thread */
Damjan Marionfd8deb42021-03-06 12:26:28 +0100617 if (vm != vlib_get_first_main ())
Dave Barach9b8ffd92016-07-08 08:13:45 -0400618 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700619
Dave Barach9b8ffd92016-07-08 08:13:45 -0400620 vlib_process_t *p = vlib_get_process_from_node (vm, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700621 n->stats_total.suspends += p->n_suspends;
622 p->n_suspends = 0;
623 rt = &p->node_runtime;
624 }
625 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400626 rt =
627 vec_elt_at_index (vm->node_main.nodes_by_type[n->type],
628 n->runtime_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700629
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000630 vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700631
632 /* Sync up runtime next frame vector counters with main node structure. */
633 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400634 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700635 uword i;
636 for (i = 0; i < rt->n_next_nodes; i++)
637 {
638 nf = vlib_node_runtime_get_next_frame (vm, rt, i);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400639 vec_elt (n->n_vectors_by_next_node, i) +=
640 nf->vectors_since_last_overflow;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700641 nf->vectors_since_last_overflow = 0;
642 }
643 }
644}
645
646always_inline u32
647vlib_node_runtime_update_stats (vlib_main_t * vm,
648 vlib_node_runtime_t * node,
649 uword n_calls,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000650 uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700651{
652 u32 ca0, ca1, v0, v1, cl0, cl1, r;
653
654 cl0 = cl1 = node->clocks_since_last_overflow;
655 ca0 = ca1 = node->calls_since_last_overflow;
656 v0 = v1 = node->vectors_since_last_overflow;
657
658 ca1 = ca0 + n_calls;
659 v1 = v0 + n_vectors;
660 cl1 = cl0 + n_clocks;
661
662 node->calls_since_last_overflow = ca1;
663 node->clocks_since_last_overflow = cl1;
664 node->vectors_since_last_overflow = v1;
Dave Barach4d1a8662018-09-10 12:31:15 -0400665
Ed Warnickecb9cada2015-12-08 15:45:58 -0700666 node->max_clock_n = node->max_clock > n_clocks ?
Dave Barach9b8ffd92016-07-08 08:13:45 -0400667 node->max_clock_n : n_vectors;
668 node->max_clock = node->max_clock > n_clocks ? node->max_clock : n_clocks;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700669
670 r = vlib_node_runtime_update_main_loop_vector_stats (vm, node, n_vectors);
671
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000672 if (PREDICT_FALSE (ca1 < ca0 || v1 < v0 || cl1 < cl0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700673 {
674 node->calls_since_last_overflow = ca0;
675 node->clocks_since_last_overflow = cl0;
676 node->vectors_since_last_overflow = v0;
Dave Barach4d1a8662018-09-10 12:31:15 -0400677
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000678 vlib_node_runtime_sync_stats (vm, node, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700679 }
680
681 return r;
682}
683
Dave Barach17e5d802019-05-01 11:30:13 -0400684always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700685vlib_process_update_stats (vlib_main_t * vm,
686 vlib_process_t * p,
Dave Barachec595ef2019-01-24 10:34:24 -0500687 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700688{
689 vlib_node_runtime_update_stats (vm, &p->node_runtime,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000690 n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691}
692
693static clib_error_t *
694vlib_cli_elog_clear (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400695 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700696{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100697 elog_reset_buffer (&vlib_global_main.elog_main);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700698 return 0;
699}
700
Dave Barach9b8ffd92016-07-08 08:13:45 -0400701/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700702VLIB_CLI_COMMAND (elog_clear_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400703 .path = "event-logger clear",
704 .short_help = "Clear the event log",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700705 .function = vlib_cli_elog_clear,
706};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400707/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708
709#ifdef CLIB_UNIX
710static clib_error_t *
711elog_save_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400712 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700713{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100714 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400715 char *file, *chroot_file;
716 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700717
Dave Barach9b8ffd92016-07-08 08:13:45 -0400718 if (!unformat (input, "%s", &file))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700719 {
720 vlib_cli_output (vm, "expected file name, got `%U'",
721 format_unformat_error, input);
722 return 0;
723 }
724
725 /* It's fairly hard to get "../oopsie" through unformat; just in case */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400726 if (strstr (file, "..") || index (file, '/'))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700727 {
728 vlib_cli_output (vm, "illegal characters in filename '%s'", file);
729 return 0;
730 }
731
732 chroot_file = (char *) format (0, "/tmp/%s%c", file, 0);
733
Dave Barach9b8ffd92016-07-08 08:13:45 -0400734 vec_free (file);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700735
736 vlib_cli_output (vm, "Saving %wd of %wd events to %s",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400737 elog_n_events_in_buffer (em),
738 elog_buffer_capacity (em), chroot_file);
739
Ed Warnickecb9cada2015-12-08 15:45:58 -0700740 vlib_worker_thread_barrier_sync (vm);
Dave Barach903fd512017-04-01 11:07:40 -0400741 error = elog_write_file (em, chroot_file, 1 /* flush ring */ );
Dave Barach9b8ffd92016-07-08 08:13:45 -0400742 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700743 vec_free (chroot_file);
744 return error;
745}
746
Dave Barach81481312017-05-16 09:08:14 -0400747void
Dave Barach27d978c2020-11-03 09:59:06 -0500748vlib_post_mortem_dump (void)
Dave Barach81481312017-05-16 09:08:14 -0400749{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100750 vlib_global_main_t *vgm = vlib_get_global_main ();
Dave Barach27d978c2020-11-03 09:59:06 -0500751
Damjan Marionfd8deb42021-03-06 12:26:28 +0100752 for (int i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
753 (vgm->post_mortem_callbacks[i]) ();
Dave Barach81481312017-05-16 09:08:14 -0400754}
755
Dave Barach9b8ffd92016-07-08 08:13:45 -0400756/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700757VLIB_CLI_COMMAND (elog_save_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400758 .path = "event-logger save",
759 .short_help = "event-logger save <filename> (saves log in /tmp/<filename>)",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700760 .function = elog_save_buffer,
761};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400762/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700763
Dave Barache5389bb2016-03-28 17:12:19 -0400764static clib_error_t *
765elog_stop (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400766 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400767{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100768 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400769
770 em->n_total_events_disable_limit = em->n_total_events;
771
772 vlib_cli_output (vm, "Stopped the event logger...");
773 return 0;
774}
775
Dave Barach9b8ffd92016-07-08 08:13:45 -0400776/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400777VLIB_CLI_COMMAND (elog_stop_cli, static) = {
778 .path = "event-logger stop",
779 .short_help = "Stop the event-logger",
780 .function = elog_stop,
781};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400782/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400783
784static clib_error_t *
785elog_restart (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400786 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400787{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100788 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400789
790 em->n_total_events_disable_limit = ~0;
791
792 vlib_cli_output (vm, "Restarted the event logger...");
793 return 0;
794}
795
Dave Barach9b8ffd92016-07-08 08:13:45 -0400796/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400797VLIB_CLI_COMMAND (elog_restart_cli, static) = {
798 .path = "event-logger restart",
799 .short_help = "Restart the event-logger",
800 .function = elog_restart,
801};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400802/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400803
804static clib_error_t *
Dave Barachbc867c32020-11-25 10:07:09 -0500805elog_resize_command_fn (vlib_main_t * vm,
806 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400807{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100808 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400809 u32 tmp;
810
811 /* Stop the parade */
Damjan Marionf553a2c2021-03-26 13:45:37 +0100812 elog_reset_buffer (em);
Dave Barache5389bb2016-03-28 17:12:19 -0400813
814 if (unformat (input, "%d", &tmp))
815 {
816 elog_alloc (em, tmp);
817 em->n_total_events_disable_limit = ~0;
818 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400819 else
Dave Barache5389bb2016-03-28 17:12:19 -0400820 return clib_error_return (0, "Must specify how many events in the ring");
821
822 vlib_cli_output (vm, "Resized ring and restarted the event logger...");
823 return 0;
824}
825
Dave Barach9b8ffd92016-07-08 08:13:45 -0400826/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400827VLIB_CLI_COMMAND (elog_resize_cli, static) = {
828 .path = "event-logger resize",
829 .short_help = "event-logger resize <nnn>",
Dave Barachbc867c32020-11-25 10:07:09 -0500830 .function = elog_resize_command_fn,
Dave Barache5389bb2016-03-28 17:12:19 -0400831};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400832/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400833
Ed Warnickecb9cada2015-12-08 15:45:58 -0700834#endif /* CLIB_UNIX */
835
Dave Barach9b8ffd92016-07-08 08:13:45 -0400836static void
837elog_show_buffer_internal (vlib_main_t * vm, u32 n_events_to_show)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700838{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100839 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400840 elog_event_t *e, *es;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700841 f64 dt;
842
843 /* Show events in VLIB time since log clock starts after VLIB clock. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400844 dt = (em->init_time.cpu - vm->clib_time.init_cpu_time)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700845 * vm->clib_time.seconds_per_clock;
846
847 es = elog_peek_events (em);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400848 vlib_cli_output (vm, "%d of %d events in buffer, logger %s", vec_len (es),
849 em->event_ring_size,
850 em->n_total_events < em->n_total_events_disable_limit ?
851 "running" : "stopped");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700852 vec_foreach (e, es)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400853 {
854 vlib_cli_output (vm, "%18.9f: %U",
855 e->time + dt, format_elog_event, em, e);
856 n_events_to_show--;
857 if (n_events_to_show == 0)
858 break;
859 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700860 vec_free (es);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400861
Ed Warnickecb9cada2015-12-08 15:45:58 -0700862}
863
864static clib_error_t *
865elog_show_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400866 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700867{
868 u32 n_events_to_show;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400869 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700870
871 n_events_to_show = 250;
872 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
873 {
874 if (unformat (input, "%d", &n_events_to_show))
875 ;
876 else if (unformat (input, "all"))
877 n_events_to_show = ~0;
878 else
879 return unformat_parse_error (input);
880 }
881 elog_show_buffer_internal (vm, n_events_to_show);
882 return error;
883}
884
Dave Barach9b8ffd92016-07-08 08:13:45 -0400885/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700886VLIB_CLI_COMMAND (elog_show_cli, static) = {
887 .path = "show event-logger",
888 .short_help = "Show event logger info",
889 .function = elog_show_buffer,
890};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400891/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700892
Dave Barach9b8ffd92016-07-08 08:13:45 -0400893void
894vlib_gdb_show_event_log (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700895{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400896 elog_show_buffer_internal (vlib_get_main (), (u32) ~ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700897}
898
Dave Barachfb6e59d2016-03-26 18:45:42 -0400899static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700900vlib_elog_main_loop_event (vlib_main_t * vm,
901 u32 node_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400902 u64 time, u32 n_vectors, u32 is_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700903{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100904 vlib_main_t *evm = vlib_get_first_main ();
Damjan Marionf553a2c2021-03-26 13:45:37 +0100905 elog_main_t *em = vlib_get_elog_main ();
Dave Barach900cbad2019-01-31 19:12:51 -0500906 int enabled = evm->elog_trace_graph_dispatch |
907 evm->elog_trace_graph_circuit;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700908
Dave Barach900cbad2019-01-31 19:12:51 -0500909 if (PREDICT_FALSE (enabled && n_vectors))
910 {
911 if (PREDICT_FALSE (!elog_is_enabled (em)))
912 {
913 evm->elog_trace_graph_dispatch = 0;
914 evm->elog_trace_graph_circuit = 0;
915 return;
916 }
917 if (PREDICT_TRUE
918 (evm->elog_trace_graph_dispatch ||
919 (evm->elog_trace_graph_circuit &&
920 node_index == evm->elog_trace_graph_circuit_node_index)))
921 {
922 elog_track (em,
923 /* event type */
924 vec_elt_at_index (is_return
925 ? evm->node_return_elog_event_types
926 : evm->node_call_elog_event_types,
927 node_index),
928 /* track */
929 (vm->thread_index ?
930 &vlib_worker_threads[vm->thread_index].elog_track
931 : &em->default_track),
932 /* data to log */ n_vectors);
933 }
934 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700935}
936
Dave Barach7bee7732017-10-18 18:48:11 -0400937#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
938void (*vlib_buffer_trace_trajectory_cb) (vlib_buffer_t * b, u32 node_index);
939void (*vlib_buffer_trace_trajectory_init_cb) (vlib_buffer_t * b);
940
Dave Barach9b8ffd92016-07-08 08:13:45 -0400941void
Dave Barach7bee7732017-10-18 18:48:11 -0400942vlib_buffer_trace_trajectory_init (vlib_buffer_t * b)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700943{
Dave Barach7bee7732017-10-18 18:48:11 -0400944 if (PREDICT_TRUE (vlib_buffer_trace_trajectory_init_cb != 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700945 {
Dave Barach7bee7732017-10-18 18:48:11 -0400946 (*vlib_buffer_trace_trajectory_init_cb) (b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700947 }
948}
949
Dave Barach7bee7732017-10-18 18:48:11 -0400950#endif
951
952static inline void
953add_trajectory_trace (vlib_buffer_t * b, u32 node_index)
954{
955#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
956 if (PREDICT_TRUE (vlib_buffer_trace_trajectory_cb != 0))
957 {
958 (*vlib_buffer_trace_trajectory_cb) (b, node_index);
959 }
960#endif
961}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700962
Damjan Marion9a332e12017-03-28 15:11:20 +0200963static_always_inline u64
Ed Warnickecb9cada2015-12-08 15:45:58 -0700964dispatch_node (vlib_main_t * vm,
965 vlib_node_runtime_t * node,
966 vlib_node_type_t type,
967 vlib_node_state_t dispatch_state,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400968 vlib_frame_t * frame, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700969{
970 uword n, v;
971 u64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400972 vlib_node_main_t *nm = &vm->node_main;
973 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700974
975 if (CLIB_DEBUG > 0)
976 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400977 vlib_node_t *n = vlib_get_node (vm, node->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700978 ASSERT (n->type == type);
979 }
980
981 /* Only non-internal nodes may be disabled. */
982 if (type != VLIB_NODE_TYPE_INTERNAL && node->state != dispatch_state)
983 {
984 ASSERT (type != VLIB_NODE_TYPE_INTERNAL);
985 return last_time_stamp;
986 }
987
988 if ((type == VLIB_NODE_TYPE_PRE_INPUT || type == VLIB_NODE_TYPE_INPUT)
989 && dispatch_state != VLIB_NODE_STATE_INTERRUPT)
990 {
991 u32 c = node->input_main_loops_per_call;
992 /* Only call node when count reaches zero. */
993 if (c)
994 {
995 node->input_main_loops_per_call = c - 1;
996 return last_time_stamp;
997 }
998 }
999
1000 /* Speculatively prefetch next frames. */
1001 if (node->n_next_nodes > 0)
1002 {
1003 nf = vec_elt_at_index (nm->next_frames, node->next_frame_index);
1004 CLIB_PREFETCH (nf, 4 * sizeof (nf[0]), WRITE);
1005 }
1006
1007 vm->cpu_time_last_node_dispatch = last_time_stamp;
1008
Dave Barach900cbad2019-01-31 19:12:51 -05001009 vlib_elog_main_loop_event (vm, node->node_index,
1010 last_time_stamp, frame ? frame->n_vectors : 0,
1011 /* is_after */ 0);
1012
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001013 vlib_node_runtime_perf_counter (vm, node, frame, 0, last_time_stamp,
1014 VLIB_NODE_RUNTIME_PERF_BEFORE);
Dave Barach900cbad2019-01-31 19:12:51 -05001015
1016 /*
1017 * Turn this on if you run into
1018 * "bad monkey" contexts, and you want to know exactly
1019 * which nodes they've visited... See ixge.c...
1020 */
1021 if (VLIB_BUFFER_TRACE_TRAJECTORY && frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001022 {
Dave Barach900cbad2019-01-31 19:12:51 -05001023 int i;
1024 u32 *from;
1025 from = vlib_frame_vector_args (frame);
1026 for (i = 0; i < frame->n_vectors; i++)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001027 {
Dave Barach900cbad2019-01-31 19:12:51 -05001028 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1029 add_trajectory_trace (b, node->node_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001030 }
Damjan Marion8b60fb02020-11-27 20:15:17 +01001031 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
1032 n = node->function (vm, node, frame);
1033 else
1034 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -05001035 }
1036 else
1037 {
Damjan Marion8b60fb02020-11-27 20:15:17 +01001038 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
1039 n = node->function (vm, node, frame);
1040 else
1041 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -05001042 }
1043
1044 t = clib_cpu_time_now ();
1045
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001046 vlib_node_runtime_perf_counter (vm, node, frame, n, t,
1047 VLIB_NODE_RUNTIME_PERF_AFTER);
Dave Barach900cbad2019-01-31 19:12:51 -05001048
1049 vlib_elog_main_loop_event (vm, node->node_index, t, n, 1 /* is_after */ );
1050
1051 vm->main_loop_vectors_processed += n;
1052 vm->main_loop_nodes_processed += n > 0;
1053
1054 v = vlib_node_runtime_update_stats (vm, node,
1055 /* n_calls */ 1,
1056 /* n_vectors */ n,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001057 /* n_clocks */ t - last_time_stamp);
Dave Barach900cbad2019-01-31 19:12:51 -05001058
Florin Coras982e44f2021-03-19 13:12:41 -07001059 /* When in adaptive mode and vector rate crosses threshold switch to
1060 polling mode and vice versa. */
1061 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_ADAPTIVE_MODE))
Dave Barach900cbad2019-01-31 19:12:51 -05001062 {
1063 /* *INDENT-OFF* */
1064 ELOG_TYPE_DECLARE (e) =
1065 {
1066 .function = (char *) __FUNCTION__,
1067 .format = "%s vector length %d, switching to %s",
1068 .format_args = "T4i4t4",
1069 .n_enum_strings = 2,
1070 .enum_strings = {
1071 "interrupt", "polling",
1072 },
1073 };
1074 /* *INDENT-ON* */
1075 struct
1076 {
1077 u32 node_name, vector_length, is_polling;
1078 } *ed;
1079
1080 if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT
1081 && v >= nm->polling_threshold_vector_length) &&
1082 !(node->flags &
1083 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
Dave Barach3ae28732018-11-16 17:19:00 -05001084 {
Dave Barach900cbad2019-01-31 19:12:51 -05001085 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1086 n->state = VLIB_NODE_STATE_POLLING;
1087 node->state = VLIB_NODE_STATE_POLLING;
1088 node->flags &=
1089 ~VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
1090 node->flags |= VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1091 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] -= 1;
1092 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001093
Damjan Marionfd8deb42021-03-06 12:26:28 +01001094 if (PREDICT_FALSE (
1095 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001096 {
Dave Barach900cbad2019-01-31 19:12:51 -05001097 vlib_worker_thread_t *w = vlib_worker_threads
1098 + vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001099
Steven6aa75af2017-02-24 10:03:22 -08001100 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1101 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001102 ed->node_name = n->name_elog_string;
1103 ed->vector_length = v;
1104 ed->is_polling = 1;
1105 }
Dave Barach900cbad2019-01-31 19:12:51 -05001106 }
1107 else if (dispatch_state == VLIB_NODE_STATE_POLLING
1108 && v <= nm->interrupt_threshold_vector_length)
1109 {
1110 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1111 if (node->flags &
1112 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001113 {
Dave Barach900cbad2019-01-31 19:12:51 -05001114 /* Switch to interrupt mode after dispatch in polling one more time.
1115 This allows driver to re-enable interrupts. */
1116 n->state = VLIB_NODE_STATE_INTERRUPT;
1117 node->state = VLIB_NODE_STATE_INTERRUPT;
1118 node->flags &=
1119 ~VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1120 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] -= 1;
1121 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] += 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001122
Dave Barach900cbad2019-01-31 19:12:51 -05001123 }
1124 else
1125 {
1126 vlib_worker_thread_t *w = vlib_worker_threads
1127 + vm->thread_index;
1128 node->flags |=
1129 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001130 if (PREDICT_FALSE (
1131 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001132 {
Steven6aa75af2017-02-24 10:03:22 -08001133 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1134 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001135 ed->node_name = n->name_elog_string;
1136 ed->vector_length = v;
1137 ed->is_polling = 0;
1138 }
1139 }
1140 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001141 }
1142
1143 return t;
1144}
1145
Damjan Marion9a332e12017-03-28 15:11:20 +02001146static u64
Dave Baracha6269992017-06-07 08:18:49 -04001147dispatch_pending_node (vlib_main_t * vm, uword pending_frame_index,
1148 u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001149{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001150 vlib_node_main_t *nm = &vm->node_main;
1151 vlib_frame_t *f;
Dave Barach11fb09e2020-08-06 12:10:09 -04001152 vlib_next_frame_t *nf, nf_placeholder;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001153 vlib_node_runtime_t *n;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001154 vlib_frame_t *restore_frame;
Dave Baracha6269992017-06-07 08:18:49 -04001155 vlib_pending_frame_t *p;
1156
1157 /* See comment below about dangling references to nm->pending_frames */
1158 p = nm->pending_frames + pending_frame_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001159
1160 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1161 p->node_runtime_index);
1162
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001163 f = vlib_get_frame (vm, p->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001164 if (p->next_frame_index == VLIB_PENDING_FRAME_NO_NEXT_FRAME)
1165 {
Dave Barach11fb09e2020-08-06 12:10:09 -04001166 /* No next frame: so use placeholder on stack. */
1167 nf = &nf_placeholder;
Damjan Marion633b6fd2018-09-14 14:38:53 +02001168 nf->flags = f->frame_flags & VLIB_NODE_FLAG_TRACE;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001169 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001170 }
1171 else
1172 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
1173
Damjan Marion633b6fd2018-09-14 14:38:53 +02001174 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001175
1176 /* Force allocation of new frame while current frame is being
1177 dispatched. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001178 restore_frame = NULL;
1179 if (nf->frame == p->frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001180 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001181 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001182 nf->flags &= ~VLIB_FRAME_IS_ALLOCATED;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001183 if (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH))
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001184 restore_frame = p->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001185 }
1186
1187 /* Frame must be pending. */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001188 ASSERT (f->frame_flags & VLIB_FRAME_PENDING);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001189 ASSERT (f->n_vectors > 0);
1190
1191 /* Copy trace flag from next frame to node.
1192 Trace flag indicates that at least one vector in the dispatched
1193 frame is traced. */
1194 n->flags &= ~VLIB_NODE_FLAG_TRACE;
1195 n->flags |= (nf->flags & VLIB_FRAME_TRACE) ? VLIB_NODE_FLAG_TRACE : 0;
1196 nf->flags &= ~VLIB_FRAME_TRACE;
1197
1198 last_time_stamp = dispatch_node (vm, n,
1199 VLIB_NODE_TYPE_INTERNAL,
1200 VLIB_NODE_STATE_POLLING,
1201 f, last_time_stamp);
Dave Baracha8df85c2019-10-01 13:34:23 -04001202 /* Internal node vector-rate accounting, for summary stats */
1203 vm->internal_node_vectors += f->n_vectors;
1204 vm->internal_node_calls++;
1205 vm->internal_node_last_vectors_per_main_loop =
1206 (f->n_vectors > vm->internal_node_last_vectors_per_main_loop) ?
1207 f->n_vectors : vm->internal_node_last_vectors_per_main_loop;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001208
Damjan Marion296988d2019-02-21 20:24:54 +01001209 f->frame_flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_NO_APPEND);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001210
1211 /* Frame is ready to be used again, so restore it. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001212 if (restore_frame != NULL)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001213 {
Dave Baracha6269992017-06-07 08:18:49 -04001214 /*
1215 * We musn't restore a frame that is flagged to be freed. This
1216 * shouldn't happen since frames to be freed post dispatch are
1217 * those used when the to-node frame becomes full i.e. they form a
1218 * sort of queue of frames to a single node. If we get here then
1219 * the to-node frame and the pending frame *were* the same, and so
1220 * we removed the to-node frame. Therefore this frame is no
1221 * longer part of the queue for that node and hence it cannot be
1222 * it's overspill.
Neale Ranns88170612016-11-22 08:29:51 +00001223 */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001224 ASSERT (!(f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH));
Neale Ranns88170612016-11-22 08:29:51 +00001225
Dave Baracha6269992017-06-07 08:18:49 -04001226 /*
1227 * NB: dispatching node n can result in the creation and scheduling
1228 * of new frames, and hence in the reallocation of nm->pending_frames.
1229 * Recompute p, or no supper. This was broken for more than 10 years.
1230 */
1231 p = nm->pending_frames + pending_frame_index;
1232
1233 /*
1234 * p->next_frame_index can change during node dispatch if node
1235 * function decides to change graph hook up.
1236 */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001237 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001238 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001239
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001240 if (NULL == nf->frame)
Neale Ranns88170612016-11-22 08:29:51 +00001241 {
1242 /* no new frame has been assigned to this node, use the saved one */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001243 nf->frame = restore_frame;
Neale Ranns88170612016-11-22 08:29:51 +00001244 f->n_vectors = 0;
1245 }
1246 else
1247 {
1248 /* The node has gained a frame, implying packets from the current frame
1249 were re-queued to this same node. we don't need the saved one
1250 anymore */
1251 vlib_frame_free (vm, n, f);
1252 }
1253 }
1254 else
Ed Warnickecb9cada2015-12-08 15:45:58 -07001255 {
Damjan Marion633b6fd2018-09-14 14:38:53 +02001256 if (f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH)
Neale Ranns88170612016-11-22 08:29:51 +00001257 {
1258 ASSERT (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH));
1259 vlib_frame_free (vm, n, f);
1260 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001261 }
1262
1263 return last_time_stamp;
1264}
1265
1266always_inline uword
1267vlib_process_stack_is_valid (vlib_process_t * p)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001268{
1269 return p->stack[0] == VLIB_PROCESS_STACK_MAGIC;
1270}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001271
Dave Barach9b8ffd92016-07-08 08:13:45 -04001272typedef struct
1273{
1274 vlib_main_t *vm;
1275 vlib_process_t *process;
1276 vlib_frame_t *frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001277} vlib_process_bootstrap_args_t;
1278
1279/* Called in process stack. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001280static uword
1281vlib_process_bootstrap (uword _a)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001282{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001283 vlib_process_bootstrap_args_t *a;
1284 vlib_main_t *vm;
1285 vlib_node_runtime_t *node;
1286 vlib_frame_t *f;
1287 vlib_process_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001288 uword n;
1289
1290 a = uword_to_pointer (_a, vlib_process_bootstrap_args_t *);
1291
1292 vm = a->vm;
1293 p = a->process;
Damjan Marioncea46522020-05-21 16:47:05 +02001294 vlib_process_finish_switch_stack (vm);
1295
Ed Warnickecb9cada2015-12-08 15:45:58 -07001296 f = a->frame;
1297 node = &p->node_runtime;
1298
1299 n = node->function (vm, node, f);
1300
1301 ASSERT (vlib_process_stack_is_valid (p));
1302
Damjan Marioncea46522020-05-21 16:47:05 +02001303 vlib_process_start_switch_stack (vm, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001304 clib_longjmp (&p->return_longjmp, n);
1305
1306 return n;
1307}
1308
1309/* Called in main stack. */
1310static_always_inline uword
Dave Barach9b8ffd92016-07-08 08:13:45 -04001311vlib_process_startup (vlib_main_t * vm, vlib_process_t * p, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001312{
1313 vlib_process_bootstrap_args_t a;
1314 uword r;
1315
1316 a.vm = vm;
1317 a.process = p;
1318 a.frame = f;
1319
1320 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1321 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001322 {
1323 vlib_process_start_switch_stack (vm, p);
1324 r = clib_calljmp (vlib_process_bootstrap, pointer_to_uword (&a),
1325 (void *) p->stack + (1 << p->log2_n_stack_bytes));
1326 }
1327 else
1328 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001329
1330 return r;
1331}
1332
1333static_always_inline uword
Damjan Marioncea46522020-05-21 16:47:05 +02001334vlib_process_resume (vlib_main_t * vm, vlib_process_t * p)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001335{
1336 uword r;
1337 p->flags &= ~(VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1338 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT
1339 | VLIB_PROCESS_RESUME_PENDING);
1340 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1341 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001342 {
1343 vlib_process_start_switch_stack (vm, p);
1344 clib_longjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_RESUME);
1345 }
1346 else
1347 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001348 return r;
1349}
1350
1351static u64
1352dispatch_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001353 vlib_process_t * p, vlib_frame_t * f, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001354{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001355 vlib_node_main_t *nm = &vm->node_main;
1356 vlib_node_runtime_t *node_runtime = &p->node_runtime;
1357 vlib_node_t *node = vlib_get_node (vm, node_runtime->node_index);
Florin Corasfd542f12018-05-16 19:28:24 -07001358 u32 old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001359 u64 t;
1360 uword n_vectors, is_suspend;
1361
1362 if (node->state != VLIB_NODE_STATE_POLLING
1363 || (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1364 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT)))
1365 return last_time_stamp;
1366
1367 p->flags |= VLIB_PROCESS_IS_RUNNING;
1368
1369 t = last_time_stamp;
1370 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1371 f ? f->n_vectors : 0, /* is_after */ 0);
1372
1373 /* Save away current process for suspend. */
Florin Corasfd542f12018-05-16 19:28:24 -07001374 old_process_index = nm->current_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001375 nm->current_process_index = node->runtime_index;
1376
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001377 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1378 VLIB_NODE_RUNTIME_PERF_BEFORE);
1379
Ed Warnickecb9cada2015-12-08 15:45:58 -07001380 n_vectors = vlib_process_startup (vm, p, f);
1381
Florin Corasfd542f12018-05-16 19:28:24 -07001382 nm->current_process_index = old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001383
1384 ASSERT (n_vectors != VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1385 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1386 if (is_suspend)
1387 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001388 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001389
1390 n_vectors = 0;
1391 pool_get (nm->suspended_process_frames, pf);
1392 pf->node_runtime_index = node->runtime_index;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001393 pf->frame = f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001394 pf->next_frame_index = ~0;
1395
1396 p->n_suspends += 1;
1397 p->suspended_process_frame_index = pf - nm->suspended_process_frames;
1398
1399 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001400 {
1401 TWT (tw_timer_wheel) * tw =
1402 (TWT (tw_timer_wheel) *) nm->timing_wheel;
1403 p->stop_timer_handle =
1404 TW (tw_timer_start) (tw,
1405 vlib_timing_wheel_data_set_suspended_process
1406 (node->runtime_index) /* [sic] pool idex */ ,
1407 0 /* timer_id */ ,
1408 p->resume_clock_interval);
1409 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001410 }
1411 else
1412 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
1413
1414 t = clib_cpu_time_now ();
1415
Dave Barach9b8ffd92016-07-08 08:13:45 -04001416 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, is_suspend,
1417 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001418
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001419 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1420 VLIB_NODE_RUNTIME_PERF_AFTER);
1421
Ed Warnickecb9cada2015-12-08 15:45:58 -07001422 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001423 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001424 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001425 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001426
1427 return t;
1428}
1429
Dave Barach9b8ffd92016-07-08 08:13:45 -04001430void
1431vlib_start_process (vlib_main_t * vm, uword process_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001432{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001433 vlib_node_main_t *nm = &vm->node_main;
1434 vlib_process_t *p = vec_elt (nm->processes, process_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001435 dispatch_process (vm, p, /* frame */ 0, /* cpu_time_now */ 0);
1436}
1437
1438static u64
1439dispatch_suspended_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001440 uword process_index, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001441{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001442 vlib_node_main_t *nm = &vm->node_main;
1443 vlib_node_runtime_t *node_runtime;
1444 vlib_node_t *node;
1445 vlib_frame_t *f;
1446 vlib_process_t *p;
1447 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001448 u64 t, n_vectors, is_suspend;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001449
Ed Warnickecb9cada2015-12-08 15:45:58 -07001450 t = last_time_stamp;
1451
1452 p = vec_elt (nm->processes, process_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001453 if (PREDICT_FALSE (!(p->flags & VLIB_PROCESS_IS_RUNNING)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001454 return last_time_stamp;
1455
1456 ASSERT (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1457 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT));
1458
Florin Coras221d6f12018-11-07 20:46:38 -08001459 pf = pool_elt_at_index (nm->suspended_process_frames,
1460 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001461
1462 node_runtime = &p->node_runtime;
1463 node = vlib_get_node (vm, node_runtime->node_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001464 f = pf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001465
Dave Barach9b8ffd92016-07-08 08:13:45 -04001466 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1467 f ? f->n_vectors : 0, /* is_after */ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001468
1469 /* Save away current process for suspend. */
1470 nm->current_process_index = node->runtime_index;
1471
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001472 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1473 VLIB_NODE_RUNTIME_PERF_BEFORE);
1474
Damjan Marioncea46522020-05-21 16:47:05 +02001475 n_vectors = vlib_process_resume (vm, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001476 t = clib_cpu_time_now ();
1477
1478 nm->current_process_index = ~0;
1479
1480 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1481 if (is_suspend)
1482 {
1483 /* Suspend it again. */
1484 n_vectors = 0;
1485 p->n_suspends += 1;
1486 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001487 {
1488 p->stop_timer_handle =
1489 TW (tw_timer_start) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
1490 vlib_timing_wheel_data_set_suspended_process
1491 (node->runtime_index) /* [sic] pool idex */ ,
1492 0 /* timer_id */ ,
1493 p->resume_clock_interval);
1494 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001495 }
1496 else
1497 {
1498 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
Florin Coras221d6f12018-11-07 20:46:38 -08001499 pool_put_index (nm->suspended_process_frames,
1500 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001501 p->suspended_process_frame_index = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001502 }
1503
1504 t = clib_cpu_time_now ();
Dave Barach9b8ffd92016-07-08 08:13:45 -04001505 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, !is_suspend,
1506 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001507
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001508 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1509 VLIB_NODE_RUNTIME_PERF_AFTER);
1510
Ed Warnickecb9cada2015-12-08 15:45:58 -07001511 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001512 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001513 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001514 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001515
1516 return t;
1517}
1518
Dave Barach2877eee2017-12-15 12:22:57 -05001519void vl_api_send_pending_rpc_requests (vlib_main_t *) __attribute__ ((weak));
1520void
1521vl_api_send_pending_rpc_requests (vlib_main_t * vm)
1522{
1523}
1524
Damjan Marione9d52d52017-03-09 15:42:26 +01001525static_always_inline void
1526vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001527{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001528 vlib_node_main_t *nm = &vm->node_main;
Damjan Marione9d52d52017-03-09 15:42:26 +01001529 vlib_thread_main_t *tm = vlib_get_thread_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001530 uword i;
1531 u64 cpu_time_now;
Dave Barach000a0292020-02-17 17:07:12 -05001532 f64 now;
Damjan Marione9d52d52017-03-09 15:42:26 +01001533 vlib_frame_queue_main_t *fqm;
Dave Barach80965f52019-03-11 09:57:38 -04001534 u32 frame_queue_check_counter = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001535
1536 /* Initialize pending node vector. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001537 if (is_main)
1538 {
1539 vec_resize (nm->pending_frames, 32);
1540 _vec_len (nm->pending_frames) = 0;
1541 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001542
1543 /* Mark time of main loop start. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001544 if (is_main)
1545 {
1546 cpu_time_now = vm->clib_time.last_cpu_time;
1547 vm->cpu_time_main_loop_start = cpu_time_now;
1548 }
1549 else
1550 cpu_time_now = clib_cpu_time_now ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001551
Damjan Marion2c2b6402017-03-28 14:16:15 +02001552 /* Pre-allocate interupt runtime indices and lock. */
Damjan Marion94100532020-11-06 23:25:57 +01001553 vec_alloc_aligned (nm->pending_interrupts, 1, CLIB_CACHE_LINE_BYTES);
Damjan Marion2c2b6402017-03-28 14:16:15 +02001554
1555 /* Pre-allocate expired nodes. */
Steven7312cc72017-03-15 21:18:55 -07001556 if (!nm->polling_threshold_vector_length)
1557 nm->polling_threshold_vector_length = 10;
1558 if (!nm->interrupt_threshold_vector_length)
1559 nm->interrupt_threshold_vector_length = 5;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001560
Damjan Marion29c0b332019-01-28 13:41:27 +01001561 vm->cpu_id = clib_get_current_cpu_id ();
1562 vm->numa_node = clib_get_current_numa_node ();
Florin Coras4c959952020-02-09 18:09:31 +00001563 os_set_numa_index (vm->numa_node);
Damjan Marion29c0b332019-01-28 13:41:27 +01001564
Ed Warnickecb9cada2015-12-08 15:45:58 -07001565 /* Start all processes. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001566 if (is_main)
1567 {
1568 uword i;
Dave Barachc602b382019-06-03 19:48:22 -04001569
1570 /*
1571 * Perform an initial barrier sync. Pays no attention to
1572 * the barrier sync hold-down timer scheme, which won't work
1573 * at this point in time.
1574 */
1575 vlib_worker_thread_initial_barrier_sync_and_release (vm);
1576
Stevenf3b53642017-05-01 14:03:02 -07001577 nm->current_process_index = ~0;
Damjan Marione9d52d52017-03-09 15:42:26 +01001578 for (i = 0; i < vec_len (nm->processes); i++)
1579 cpu_time_now = dispatch_process (vm, nm->processes[i], /* frame */ 0,
1580 cpu_time_now);
1581 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001582
1583 while (1)
1584 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001585 vlib_node_runtime_t *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001586
Dave Barach2877eee2017-12-15 12:22:57 -05001587 if (PREDICT_FALSE (_vec_len (vm->pending_rpc_requests) > 0))
Dave Barachf6c68d72018-11-01 08:12:52 -04001588 {
1589 if (!is_main)
1590 vl_api_send_pending_rpc_requests (vm);
1591 }
Dave Barach2877eee2017-12-15 12:22:57 -05001592
Damjan Marione9d52d52017-03-09 15:42:26 +01001593 if (!is_main)
Damjan Marionf6e6c782020-09-17 09:54:07 +02001594 vlib_worker_thread_barrier_check ();
1595
1596 if (PREDICT_FALSE (vm->check_frame_queues + frame_queue_check_counter))
Damjan Marione9d52d52017-03-09 15:42:26 +01001597 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001598 u32 processed = 0;
1599
1600 if (vm->check_frame_queues)
Dave Barach80965f52019-03-11 09:57:38 -04001601 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001602 frame_queue_check_counter = 100;
1603 vm->check_frame_queues = 0;
Dave Barach80965f52019-03-11 09:57:38 -04001604 }
Damjan Marionf6e6c782020-09-17 09:54:07 +02001605
1606 vec_foreach (fqm, tm->frame_queue_mains)
1607 processed += vlib_frame_queue_dequeue (vm, fqm);
1608
1609 /* No handoff queue work found? */
1610 if (processed)
1611 frame_queue_check_counter = 100;
1612 else
1613 frame_queue_check_counter--;
Damjan Marione9d52d52017-03-09 15:42:26 +01001614 }
1615
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001616 if (PREDICT_FALSE (vec_len (vm->worker_thread_main_loop_callbacks)))
1617 clib_call_callbacks (vm->worker_thread_main_loop_callbacks, vm,
1618 cpu_time_now);
1619
Ed Warnickecb9cada2015-12-08 15:45:58 -07001620 /* Process pre-input nodes. */
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001621 cpu_time_now = clib_cpu_time_now ();
Damjan Marionceab7882018-01-19 20:56:12 +01001622 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1623 cpu_time_now = dispatch_node (vm, n,
1624 VLIB_NODE_TYPE_PRE_INPUT,
1625 VLIB_NODE_STATE_POLLING,
1626 /* frame */ 0,
1627 cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001628
1629 /* Next process input nodes. */
1630 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1631 cpu_time_now = dispatch_node (vm, n,
1632 VLIB_NODE_TYPE_INPUT,
1633 VLIB_NODE_STATE_POLLING,
1634 /* frame */ 0,
1635 cpu_time_now);
1636
Damjan Marione9d52d52017-03-09 15:42:26 +01001637 if (PREDICT_TRUE (is_main && vm->queue_signal_pending == 0))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001638 vm->queue_signal_callback (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001639
Damjan Marion94100532020-11-06 23:25:57 +01001640 if (__atomic_load_n (nm->pending_interrupts, __ATOMIC_ACQUIRE))
Damjan Marion0b316302020-09-09 18:55:16 +02001641 {
Damjan Marion94100532020-11-06 23:25:57 +01001642 int int_num = -1;
1643 *nm->pending_interrupts = 0;
Dave Barachd47c5092018-01-19 13:09:20 -05001644
Damjan Marion94100532020-11-06 23:25:57 +01001645 while ((int_num =
1646 clib_interrupt_get_next (nm->interrupts, int_num)) != -1)
1647 {
1648 vlib_node_runtime_t *n;
1649 clib_interrupt_clear (nm->interrupts, int_num);
1650 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1651 int_num);
1652 cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,
1653 VLIB_NODE_STATE_INTERRUPT,
1654 /* frame */ 0, cpu_time_now);
1655 }
Damjan Marion1033b492020-06-03 12:20:41 +02001656 }
1657
Dave Barache3248982018-08-14 13:47:58 -04001658 /* Input nodes may have added work to the pending vector.
1659 Process pending vector until there is nothing left.
1660 All pending vectors will be processed from input -> output. */
1661 for (i = 0; i < _vec_len (nm->pending_frames); i++)
1662 cpu_time_now = dispatch_pending_node (vm, i, cpu_time_now);
1663 /* Reset pending vector for next iteration. */
1664 _vec_len (nm->pending_frames) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001665
Damjan Marione9d52d52017-03-09 15:42:26 +01001666 if (is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001667 {
Dave Barach900cbad2019-01-31 19:12:51 -05001668 /* *INDENT-OFF* */
1669 ELOG_TYPE_DECLARE (es) =
1670 {
1671 .format = "process tw start",
1672 .format_args = "",
1673 };
1674 ELOG_TYPE_DECLARE (ee) =
1675 {
1676 .format = "process tw end: %d",
1677 .format_args = "i4",
1678 };
1679 /* *INDENT-ON* */
1680
1681 struct
1682 {
1683 int nready_procs;
1684 } *ed;
1685
Damjan Marione9d52d52017-03-09 15:42:26 +01001686 /* Check if process nodes have expired from timing wheel. */
Dave Barach5c20a012017-06-13 08:48:31 -04001687 ASSERT (nm->data_from_advancing_timing_wheel != 0);
1688
Dave Barach900cbad2019-01-31 19:12:51 -05001689 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1690 ed = ELOG_DATA (&vlib_global_main.elog_main, es);
1691
Dave Barach5c20a012017-06-13 08:48:31 -04001692 nm->data_from_advancing_timing_wheel =
1693 TW (tw_timer_expire_timers_vec)
1694 ((TWT (tw_timer_wheel) *) nm->timing_wheel, vlib_time_now (vm),
1695 nm->data_from_advancing_timing_wheel);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001696
Damjan Marione9d52d52017-03-09 15:42:26 +01001697 ASSERT (nm->data_from_advancing_timing_wheel != 0);
Dave Barach5c20a012017-06-13 08:48:31 -04001698
Dave Barach900cbad2019-01-31 19:12:51 -05001699 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1700 {
1701 ed = ELOG_DATA (&vlib_global_main.elog_main, ee);
1702 ed->nready_procs =
1703 _vec_len (nm->data_from_advancing_timing_wheel);
1704 }
1705
Damjan Marione9d52d52017-03-09 15:42:26 +01001706 if (PREDICT_FALSE
1707 (_vec_len (nm->data_from_advancing_timing_wheel) > 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001708 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001709 uword i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001710
Damjan Marione9d52d52017-03-09 15:42:26 +01001711 for (i = 0; i < _vec_len (nm->data_from_advancing_timing_wheel);
1712 i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001713 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001714 u32 d = nm->data_from_advancing_timing_wheel[i];
1715 u32 di = vlib_timing_wheel_data_get_index (d);
1716
1717 if (vlib_timing_wheel_data_is_timed_event (d))
1718 {
1719 vlib_signal_timed_event_data_t *te =
1720 pool_elt_at_index (nm->signal_timed_event_data_pool,
1721 di);
1722 vlib_node_t *n =
1723 vlib_get_node (vm, te->process_node_index);
1724 vlib_process_t *p =
1725 vec_elt (nm->processes, n->runtime_index);
1726 void *data;
1727 data =
1728 vlib_process_signal_event_helper (nm, n, p,
1729 te->event_type_index,
1730 te->n_data_elts,
1731 te->n_data_elt_bytes);
1732 if (te->n_data_bytes < sizeof (te->inline_event_data))
Dave Barach178cf492018-11-13 16:34:13 -05001733 clib_memcpy_fast (data, te->inline_event_data,
1734 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001735 else
1736 {
Dave Barach178cf492018-11-13 16:34:13 -05001737 clib_memcpy_fast (data, te->event_data_as_vector,
1738 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001739 vec_free (te->event_data_as_vector);
1740 }
1741 pool_put (nm->signal_timed_event_data_pool, te);
1742 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001743 else
1744 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001745 cpu_time_now = clib_cpu_time_now ();
1746 cpu_time_now =
1747 dispatch_suspended_process (vm, di, cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001748 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001749 }
Damjan Marione9d52d52017-03-09 15:42:26 +01001750 _vec_len (nm->data_from_advancing_timing_wheel) = 0;
1751 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001752 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001753 vlib_increment_main_loop_counter (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001754 /* Record time stamp in case there are no enabled nodes and above
Dave Barach9b8ffd92016-07-08 08:13:45 -04001755 calls do not update time stamp. */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001756 cpu_time_now = clib_cpu_time_now ();
Dave Barach000a0292020-02-17 17:07:12 -05001757 vm->loops_this_reporting_interval++;
1758 now = clib_time_now_internal (&vm->clib_time, cpu_time_now);
1759 /* Time to update loops_per_second? */
1760 if (PREDICT_FALSE (now >= vm->loop_interval_end))
1761 {
1762 /* Next sample ends in 20ms */
1763 if (vm->loop_interval_start)
1764 {
1765 f64 this_loops_per_second;
1766
1767 this_loops_per_second =
1768 ((f64) vm->loops_this_reporting_interval) / (now -
1769 vm->loop_interval_start);
1770
1771 vm->loops_per_second =
1772 vm->loops_per_second * vm->damping_constant +
1773 (1.0 - vm->damping_constant) * this_loops_per_second;
1774 if (vm->loops_per_second != 0.0)
1775 vm->seconds_per_loop = 1.0 / vm->loops_per_second;
1776 else
1777 vm->seconds_per_loop = 0.0;
1778 }
1779 /* New interval starts now, and ends in 20ms */
1780 vm->loop_interval_start = now;
1781 vm->loop_interval_end = now + 2e-4;
1782 vm->loops_this_reporting_interval = 0;
1783 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001784 }
1785}
Dave Barach9b8ffd92016-07-08 08:13:45 -04001786
Damjan Marione9d52d52017-03-09 15:42:26 +01001787static void
1788vlib_main_loop (vlib_main_t * vm)
1789{
1790 vlib_main_or_worker_loop (vm, /* is_main */ 1);
1791}
1792
1793void
1794vlib_worker_loop (vlib_main_t * vm)
1795{
1796 vlib_main_or_worker_loop (vm, /* is_main */ 0);
1797}
1798
Damjan Marionfd8deb42021-03-06 12:26:28 +01001799vlib_global_main_t vlib_global_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001800
Damjan Marion25ab6c52021-03-05 14:41:25 +01001801void
1802vlib_add_del_post_mortem_callback (void *cb, int is_add)
1803{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001804 vlib_global_main_t *vgm = vlib_get_global_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001805 int i;
1806
1807 if (is_add == 0)
1808 {
Damjan Marionfd8deb42021-03-06 12:26:28 +01001809 for (i = vec_len (vgm->post_mortem_callbacks) - 1; i >= 0; i--)
1810 if (vgm->post_mortem_callbacks[i] == cb)
1811 vec_del1 (vgm->post_mortem_callbacks, i);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001812 return;
1813 }
1814
Damjan Marionfd8deb42021-03-06 12:26:28 +01001815 for (i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
1816 if (vgm->post_mortem_callbacks[i] == cb)
Damjan Marion25ab6c52021-03-05 14:41:25 +01001817 return;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001818 vec_add1 (vgm->post_mortem_callbacks, cb);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001819}
1820
1821static void
1822elog_post_mortem_dump (void)
1823{
Damjan Marionf553a2c2021-03-26 13:45:37 +01001824 elog_main_t *em = vlib_get_elog_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001825
1826 u8 *filename;
1827 clib_error_t *error;
1828
1829 filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
1830 error = elog_write_file (em, (char *) filename, 1 /* flush ring */);
1831 if (error)
1832 clib_error_report (error);
1833 /*
1834 * We're in the middle of crashing. Don't try to free the filename.
1835 */
1836}
1837
Ed Warnickecb9cada2015-12-08 15:45:58 -07001838static clib_error_t *
1839vlib_main_configure (vlib_main_t * vm, unformat_input_t * input)
1840{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001841 vlib_global_main_t *vgm = vlib_get_global_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001842 int turn_on_mem_trace = 0;
1843
1844 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1845 {
1846 if (unformat (input, "memory-trace"))
1847 turn_on_mem_trace = 1;
1848
1849 else if (unformat (input, "elog-events %d",
Damjan Marionfd8deb42021-03-06 12:26:28 +01001850 &vgm->configured_elog_ring_size))
1851 vgm->configured_elog_ring_size =
1852 1 << max_log2 (vgm->configured_elog_ring_size);
Dave Barach81481312017-05-16 09:08:14 -04001853 else if (unformat (input, "elog-post-mortem-dump"))
Damjan Marion25ab6c52021-03-05 14:41:25 +01001854 vlib_add_del_post_mortem_callback (elog_post_mortem_dump,
1855 /* is_add */ 1);
Dave Barachc74b43c2020-04-09 17:24:07 -04001856 else if (unformat (input, "buffer-alloc-success-rate %f",
1857 &vm->buffer_alloc_success_rate))
1858 {
1859 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1860 return clib_error_return
1861 (0, "Buffer fault injection not configured");
1862 }
1863 else if (unformat (input, "buffer-alloc-success-seed %u",
1864 &vm->buffer_alloc_success_seed))
1865 {
1866 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1867 return clib_error_return
1868 (0, "Buffer fault injection not configured");
1869 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001870 else
1871 return unformat_parse_error (input);
1872 }
1873
1874 unformat_free (input);
1875
1876 /* Enable memory trace as early as possible. */
1877 if (turn_on_mem_trace)
1878 clib_mem_trace (1);
1879
1880 return 0;
1881}
1882
1883VLIB_EARLY_CONFIG_FUNCTION (vlib_main_configure, "vlib");
1884
Dave Barach9b8ffd92016-07-08 08:13:45 -04001885static void
Dave Barach11fb09e2020-08-06 12:10:09 -04001886placeholder_queue_signal_callback (vlib_main_t * vm)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001887{
1888}
Dave Barach16c75df2016-05-31 14:05:46 -04001889
Dave Barach1f806582018-06-14 09:18:21 -04001890#define foreach_weak_reference_stub \
1891_(vlib_map_stat_segment_init) \
1892_(vpe_api_init) \
1893_(vlibmemory_init) \
1894_(map_api_segment_init)
1895
1896#define _(name) \
1897clib_error_t *name (vlib_main_t *vm) __attribute__((weak)); \
1898clib_error_t *name (vlib_main_t *vm) { return 0; }
1899foreach_weak_reference_stub;
1900#undef _
1901
Dave Barachb09f4d02019-07-15 16:00:03 -04001902void vl_api_set_elog_main (elog_main_t * m) __attribute__ ((weak));
1903void
1904vl_api_set_elog_main (elog_main_t * m)
1905{
1906 clib_warning ("STUB");
1907}
1908
1909int vl_api_set_elog_trace_api_messages (int enable) __attribute__ ((weak));
1910int
1911vl_api_set_elog_trace_api_messages (int enable)
1912{
1913 clib_warning ("STUB");
1914 return 0;
1915}
1916
1917int vl_api_get_elog_trace_api_messages (void) __attribute__ ((weak));
1918int
1919vl_api_get_elog_trace_api_messages (void)
1920{
1921 clib_warning ("STUB");
1922 return 0;
1923}
1924
Ed Warnickecb9cada2015-12-08 15:45:58 -07001925/* Main function. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001926int
Eyal Barid334a6b2016-09-19 10:23:39 +03001927vlib_main (vlib_main_t * volatile vm, unformat_input_t * input)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001928{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001929 vlib_global_main_t *vgm = vlib_get_global_main ();
Eyal Barid334a6b2016-09-19 10:23:39 +03001930 clib_error_t *volatile error;
Dave Barach5c20a012017-06-13 08:48:31 -04001931 vlib_node_main_t *nm = &vm->node_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001932
Dave Barach11fb09e2020-08-06 12:10:09 -04001933 vm->queue_signal_callback = placeholder_queue_signal_callback;
Dave Barach16c75df2016-05-31 14:05:46 -04001934
Dave Barachbc867c32020-11-25 10:07:09 -05001935 /* Reconfigure event log which is enabled very early */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001936 if (vgm->configured_elog_ring_size &&
1937 vgm->configured_elog_ring_size != vgm->elog_main.event_ring_size)
1938 elog_resize (&vgm->elog_main, vgm->configured_elog_ring_size);
Damjan Marionf553a2c2021-03-26 13:45:37 +01001939 vl_api_set_elog_main (vlib_get_elog_main ());
Dave Barachb09f4d02019-07-15 16:00:03 -04001940 (void) vl_api_set_elog_trace_api_messages (1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001941
1942 /* Default name. */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001943 if (!vgm->name)
1944 vgm->name = "VLIB";
Ed Warnickecb9cada2015-12-08 15:45:58 -07001945
Damjan Marion68b4da62018-09-30 18:26:20 +02001946 if ((error = vlib_physmem_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001947 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001948 clib_error_report (error);
1949 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001950 }
Damjan Marion49d66f12017-07-20 18:10:35 +02001951
Filip Tehlard2bbdef2019-02-22 05:05:53 -08001952 if ((error = vlib_map_stat_segment_init (vm)))
1953 {
1954 clib_error_report (error);
1955 goto done;
1956 }
1957
Damjan Marion49d66f12017-07-20 18:10:35 +02001958 if ((error = vlib_buffer_main_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001959 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001960 clib_error_report (error);
1961 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001962 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001963
1964 if ((error = vlib_thread_init (vm)))
1965 {
1966 clib_error_report (error);
1967 goto done;
1968 }
1969
Damjan Mariona31698b2021-03-10 14:35:28 +01001970 /* Register node ifunction variants */
1971 vlib_register_all_node_march_variants (vm);
1972
Ed Warnickecb9cada2015-12-08 15:45:58 -07001973 /* Register static nodes so that init functions may use them. */
1974 vlib_register_all_static_nodes (vm);
1975
1976 /* Set seed for random number generator.
1977 Allow user to specify seed to make random sequence deterministic. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001978 if (!unformat (input, "seed %wd", &vm->random_seed))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001979 vm->random_seed = clib_cpu_time_now ();
1980 clib_random_buffer_init (&vm->random_buffer, vm->random_seed);
1981
Ed Warnickecb9cada2015-12-08 15:45:58 -07001982 /* Initialize node graph. */
1983 if ((error = vlib_node_main_init (vm)))
1984 {
1985 /* Arrange for graph hook up error to not be fatal when debugging. */
1986 if (CLIB_DEBUG > 0)
1987 clib_error_report (error);
1988 else
1989 goto done;
1990 }
1991
Dave Barach1f806582018-06-14 09:18:21 -04001992 /* Direct call / weak reference, for vlib standalone use-cases */
1993 if ((error = vpe_api_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001994 {
1995 clib_error_report (error);
1996 goto done;
1997 }
1998
Dave Barach1f806582018-06-14 09:18:21 -04001999 if ((error = vlibmemory_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04002000 {
2001 clib_error_report (error);
2002 goto done;
2003 }
2004
Dave Barach1f806582018-06-14 09:18:21 -04002005 if ((error = map_api_segment_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04002006 {
2007 clib_error_report (error);
2008 goto done;
2009 }
2010
Ole Troan964f93e2016-06-10 13:22:36 +02002011 /* See unix/main.c; most likely already set up */
Damjan Marionfd8deb42021-03-06 12:26:28 +01002012 if (vgm->init_functions_called == 0)
2013 vgm->init_functions_called = hash_create (0, /* value bytes */ 0);
Ole Troan964f93e2016-06-10 13:22:36 +02002014 if ((error = vlib_call_all_init_functions (vm)))
2015 goto done;
2016
Dave Barach5c20a012017-06-13 08:48:31 -04002017 nm->timing_wheel = clib_mem_alloc_aligned (sizeof (TWT (tw_timer_wheel)),
2018 CLIB_CACHE_LINE_BYTES);
2019
2020 vec_validate (nm->data_from_advancing_timing_wheel, 10);
2021 _vec_len (nm->data_from_advancing_timing_wheel) = 0;
2022
2023 /* Create the process timing wheel */
2024 TW (tw_timer_wheel_init) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
2025 0 /* no callback */ ,
2026 10e-6 /* timer period 10us */ ,
2027 ~0 /* max expirations per call */ );
2028
Dave Barach2877eee2017-12-15 12:22:57 -05002029 vec_validate (vm->pending_rpc_requests, 0);
2030 _vec_len (vm->pending_rpc_requests) = 0;
Dave Barachf6c68d72018-11-01 08:12:52 -04002031 vec_validate (vm->processing_rpc_requests, 0);
2032 _vec_len (vm->processing_rpc_requests) = 0;
Dave Barach2877eee2017-12-15 12:22:57 -05002033
Dave Barachc74b43c2020-04-09 17:24:07 -04002034 /* Default params for the buffer allocator fault injector, if configured */
2035 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
2036 {
2037 vm->buffer_alloc_success_seed = 0xdeaddabe;
2038 vm->buffer_alloc_success_rate = 0.80;
2039 }
2040
Dave Barachd1e17d02019-03-21 18:01:48 -04002041 if ((error = vlib_call_all_config_functions (vm, input, 0 /* is_early */ )))
2042 goto done;
2043
Dave Barach000a0292020-02-17 17:07:12 -05002044 /*
2045 * Use exponential smoothing, with a half-life of 1 second
2046 * reported_rate(t) = reported_rate(t-1) * K + rate(t)*(1-K)
2047 *
2048 * Sample every 20ms, aka 50 samples per second
2049 * K = exp (-1.0/20.0);
2050 * K = 0.95
2051 */
2052 vm->damping_constant = exp (-1.0 / 20.0);
2053
Dave Barachc602b382019-06-03 19:48:22 -04002054 /* Sort per-thread init functions before we start threads */
Damjan Marionfd8deb42021-03-06 12:26:28 +01002055 vlib_sort_init_exit_functions (&vgm->worker_init_function_registrations);
Dave Barachc602b382019-06-03 19:48:22 -04002056
Dave Barachd1e17d02019-03-21 18:01:48 -04002057 /* Call all main loop enter functions. */
2058 {
2059 clib_error_t *sub_error;
2060 sub_error = vlib_call_all_main_loop_enter_functions (vm);
2061 if (sub_error)
2062 clib_error_report (sub_error);
2063 }
2064
Ed Warnickecb9cada2015-12-08 15:45:58 -07002065 switch (clib_setjmp (&vm->main_loop_exit, VLIB_MAIN_LOOP_EXIT_NONE))
2066 {
2067 case VLIB_MAIN_LOOP_EXIT_NONE:
2068 vm->main_loop_exit_set = 1;
2069 break;
2070
2071 case VLIB_MAIN_LOOP_EXIT_CLI:
2072 goto done;
2073
2074 default:
2075 error = vm->main_loop_error;
2076 goto done;
2077 }
2078
Ed Warnickecb9cada2015-12-08 15:45:58 -07002079 vlib_main_loop (vm);
2080
Dave Barach9b8ffd92016-07-08 08:13:45 -04002081done:
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302082 vlib_worker_thread_barrier_sync (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002083 /* Call all exit functions. */
2084 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04002085 clib_error_t *sub_error;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002086 sub_error = vlib_call_all_main_loop_exit_functions (vm);
2087 if (sub_error)
2088 clib_error_report (sub_error);
2089 }
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302090 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002091
2092 if (error)
2093 clib_error_report (error);
2094
2095 return 0;
2096}
Dave Barach9b8ffd92016-07-08 08:13:45 -04002097
Dave Barachab1a50c2020-10-06 14:08:16 -04002098vlib_main_t *
2099vlib_get_main_not_inline (void)
2100{
2101 return vlib_get_main ();
2102}
2103
2104elog_main_t *
2105vlib_get_elog_main_not_inline ()
2106{
2107 return &vlib_global_main.elog_main;
2108}
2109
Dave Barach9b8ffd92016-07-08 08:13:45 -04002110/*
2111 * fd.io coding-style-patch-verification: ON
2112 *
2113 * Local Variables:
2114 * eval: (c-set-style "gnu")
2115 * End:
2116 */