blob: 189884a97f99dd82b165ce32bdc7798d76c4044b [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * main.c: main vector processing loop
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <math.h>
41#include <vppinfra/format.h>
42#include <vlib/vlib.h>
43#include <vlib/threads.h>
Dave Barach5c20a012017-06-13 08:48:31 -040044#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070045
Damjan Marion04a7f052017-07-10 15:06:17 +020046#include <vlib/unix/unix.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070047
Ed Warnickecb9cada2015-12-08 15:45:58 -070048#define VLIB_FRAME_MAGIC (0xabadc0ed)
Ed Warnickecb9cada2015-12-08 15:45:58 -070049
50always_inline u32 *
51vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node)
52{
Damjan Marionb32bd702021-12-23 17:05:02 +010053 return (void *) f + node->magic_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -070054}
55
Andreas Schultz58b2eb12019-07-15 15:40:56 +020056static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -040057vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index,
58 u32 frame_flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -070059{
Dave Barach9b8ffd92016-07-08 08:13:45 -040060 vlib_node_main_t *nm = &vm->node_main;
61 vlib_frame_size_t *fs;
62 vlib_node_t *to_node;
63 vlib_frame_t *f;
Damjan Marionb32bd702021-12-23 17:05:02 +010064 u32 l, n;
Ed Warnickecb9cada2015-12-08 15:45:58 -070065
Dave Baracha8f4ebd2021-02-08 07:56:22 -050066 ASSERT (vm == vlib_get_main ());
67
Ed Warnickecb9cada2015-12-08 15:45:58 -070068 to_node = vlib_get_node (vm, to_node_index);
69
Damjan Marionb32bd702021-12-23 17:05:02 +010070 vec_validate (nm->frame_sizes, to_node->frame_size_index);
71 fs = vec_elt_at_index (nm->frame_sizes, to_node->frame_size_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -070072
Damjan Marionb32bd702021-12-23 17:05:02 +010073 if (fs->frame_size == 0)
74 fs->frame_size = to_node->frame_size;
75 else
76 ASSERT (fs->frame_size == to_node->frame_size);
77
78 n = fs->frame_size;
Andreas Schultz58b2eb12019-07-15 15:40:56 +020079 if ((l = vec_len (fs->free_frames)) > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -070080 {
81 /* Allocate from end of free list. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +020082 f = fs->free_frames[l - 1];
83 _vec_len (fs->free_frames) = l - 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -070084 }
85 else
86 {
Damjan Marionb32bd702021-12-23 17:05:02 +010087 f = clib_mem_alloc_aligned_no_fail (n, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -070088 }
89
90 /* Poison frame when debugging. */
91 if (CLIB_DEBUG > 0)
Damjan Marionb32bd702021-12-23 17:05:02 +010092 clib_memset_u8 (f, 0xfe, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
94 /* Insert magic number. */
95 {
Dave Barach9b8ffd92016-07-08 08:13:45 -040096 u32 *magic;
Ed Warnickecb9cada2015-12-08 15:45:58 -070097
98 magic = vlib_frame_find_magic (f, to_node);
99 *magic = VLIB_FRAME_MAGIC;
100 }
101
Damjan Marion633b6fd2018-09-14 14:38:53 +0200102 f->frame_flags = VLIB_FRAME_IS_ALLOCATED | frame_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700103 f->n_vectors = 0;
Damjan Marionb32bd702021-12-23 17:05:02 +0100104 f->scalar_offset = to_node->scalar_offset;
105 f->vector_offset = to_node->vector_offset;
106 f->aux_offset = to_node->aux_offset;
Damjan Mariona3d59862018-11-10 10:23:00 +0100107 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700108
109 fs->n_alloc_frames += 1;
110
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200111 return f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112}
113
114/* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX.
115 Returns frame index. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200116static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400117vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime,
118 u32 to_next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700119{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400120 vlib_node_t *from_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
122 from_node = vlib_get_node (vm, from_node_runtime->node_index);
123 ASSERT (to_next_index < vec_len (from_node->next_nodes));
124
Dave Barach9b8ffd92016-07-08 08:13:45 -0400125 return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index],
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126 /* frame_flags */ 0);
127}
128
129vlib_frame_t *
130vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index)
131{
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200132 vlib_frame_t *f = vlib_frame_alloc_to_node (vm, to_node_index,
133 /* frame_flags */
134 VLIB_FRAME_FREE_AFTER_DISPATCH);
135 return vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136}
137
Dave Barachc74b43c2020-04-09 17:24:07 -0400138static inline void
139vlib_validate_frame_indices (vlib_frame_t * f)
140{
141 if (CLIB_DEBUG > 0)
142 {
143 int i;
144 u32 *from = vlib_frame_vector_args (f);
145
146 /* Check for bad buffer index values */
147 for (i = 0; i < f->n_vectors; i++)
148 {
149 if (from[i] == 0)
150 {
151 clib_warning ("BUG: buffer index 0 at index %d", i);
152 ASSERT (0);
153 }
154 else if (from[i] == 0xfefefefe)
155 {
156 clib_warning ("BUG: frame poison pattern at index %d", i);
157 ASSERT (0);
158 }
159 }
160 }
161}
162
Dave Barach9b8ffd92016-07-08 08:13:45 -0400163void
164vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400166 vlib_pending_frame_t *p;
167 vlib_node_t *to_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
169 if (f->n_vectors == 0)
170 return;
171
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500172 ASSERT (vm == vlib_get_main ());
173
Dave Barachc74b43c2020-04-09 17:24:07 -0400174 vlib_validate_frame_indices (f);
175
Ed Warnickecb9cada2015-12-08 15:45:58 -0700176 to_node = vlib_get_node (vm, to_node_index);
177
178 vec_add2 (vm->node_main.pending_frames, p, 1);
179
Damjan Marion633b6fd2018-09-14 14:38:53 +0200180 f->frame_flags |= VLIB_FRAME_PENDING;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200181 p->frame = vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700182 p->node_runtime_index = to_node->runtime_index;
183 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
184}
185
186/* Free given frame. */
187void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400188vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700189{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400190 vlib_node_main_t *nm = &vm->node_main;
191 vlib_node_t *node;
192 vlib_frame_size_t *fs;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400193
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500194 ASSERT (vm == vlib_get_main ());
Damjan Marion633b6fd2018-09-14 14:38:53 +0200195 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700196
197 node = vlib_get_node (vm, r->node_index);
Damjan Marionb32bd702021-12-23 17:05:02 +0100198 fs = vec_elt_at_index (nm->frame_sizes, node->frame_size_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700199
Damjan Marion633b6fd2018-09-14 14:38:53 +0200200 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201
202 /* No next frames may point to freed frame. */
203 if (CLIB_DEBUG > 0)
204 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400205 vlib_next_frame_t *nf;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200206 vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame != f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207 }
208
Damjan Marion296988d2019-02-21 20:24:54 +0100209 f->frame_flags &= ~(VLIB_FRAME_IS_ALLOCATED | VLIB_FRAME_NO_APPEND);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200211 vec_add1 (fs->free_frames, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212 ASSERT (fs->n_alloc_frames > 0);
213 fs->n_alloc_frames -= 1;
214}
215
216static clib_error_t *
217show_frame_stats (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400218 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700219{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400220 vlib_frame_size_t *fs;
221
Damjan Marionb32bd702021-12-23 17:05:02 +0100222 vlib_cli_output (vm, "%=8s%=6s%=12s%=12s", "Thread", "Size", "# Alloc",
223 "# Free");
224 foreach_vlib_main ()
225 {
226 vlib_node_main_t *nm = &this_vlib_main->node_main;
227 vec_foreach (fs, nm->frame_sizes)
228 {
229 u32 n_alloc = fs->n_alloc_frames;
230 u32 n_free = vec_len (fs->free_frames);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231
Damjan Marionb32bd702021-12-23 17:05:02 +0100232 if (n_alloc + n_free > 0)
233 vlib_cli_output (vm, "%=8d%=6d%=12d%=12d",
234 this_vlib_main->thread_index, fs->frame_size,
235 n_alloc, n_free);
236 }
237 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238
239 return 0;
240}
241
Dave Barach9b8ffd92016-07-08 08:13:45 -0400242/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243VLIB_CLI_COMMAND (show_frame_stats_cli, static) = {
244 .path = "show vlib frame-allocation",
245 .short_help = "Show node dispatch frame statistics",
246 .function = show_frame_stats,
247};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400248/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249
250/* Change ownership of enqueue rights to given next node. */
251static void
252vlib_next_frame_change_ownership (vlib_main_t * vm,
253 vlib_node_runtime_t * node_runtime,
254 u32 next_index)
255{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400256 vlib_node_main_t *nm = &vm->node_main;
257 vlib_next_frame_t *next_frame;
258 vlib_node_t *node, *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259
260 node = vec_elt (nm->nodes, node_runtime->node_index);
261
262 /* Only internal & input nodes are allowed to call other nodes. */
263 ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL
264 || node->type == VLIB_NODE_TYPE_INPUT
265 || node->type == VLIB_NODE_TYPE_PROCESS);
266
267 ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes);
268
Dave Barach9b8ffd92016-07-08 08:13:45 -0400269 next_frame =
270 vlib_node_runtime_get_next_frame (vm, node_runtime, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271 next_node = vec_elt (nm->nodes, node->next_nodes[next_index]);
272
273 if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX)
274 {
275 /* Get frame from previous owner. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400276 vlib_next_frame_t *owner_next_frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277 vlib_next_frame_t tmp;
278
279 owner_next_frame =
280 vlib_node_get_next_frame (vm,
281 next_node->owner_node_index,
282 next_node->owner_next_index);
283
284 /* Swap target next frame with owner's. */
285 tmp = owner_next_frame[0];
286 owner_next_frame[0] = next_frame[0];
287 next_frame[0] = tmp;
288
289 /*
290 * If next_frame is already pending, we have to track down
291 * all pending frames and fix their next_frame_index fields.
292 */
293 if (next_frame->flags & VLIB_FRAME_PENDING)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400294 {
295 vlib_pending_frame_t *p;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200296 if (next_frame->frame != NULL)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400297 {
298 vec_foreach (p, nm->pending_frames)
299 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200300 if (p->frame == next_frame->frame)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400301 {
302 p->next_frame_index =
303 next_frame - vm->node_main.next_frames;
304 }
305 }
306 }
307 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 }
309 else
310 {
311 /* No previous owner. Take ownership. */
312 next_frame->flags |= VLIB_FRAME_OWNER;
313 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400314
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315 /* Record new owner. */
316 next_node->owner_node_index = node->index;
317 next_node->owner_next_index = next_index;
318
319 /* Now we should be owner. */
320 ASSERT (next_frame->flags & VLIB_FRAME_OWNER);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400321}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322
323/* Make sure that magic number is still there.
324 Otherwise, it is likely that caller has overrun frame arguments. */
325always_inline void
326validate_frame_magic (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400327 vlib_frame_t * f, vlib_node_t * n, uword next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400329 vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]);
330 u32 *magic = vlib_frame_find_magic (f, next_node);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700331 ASSERT (VLIB_FRAME_MAGIC == magic[0]);
332}
333
334vlib_frame_t *
335vlib_get_next_frame_internal (vlib_main_t * vm,
336 vlib_node_runtime_t * node,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400337 u32 next_index, u32 allocate_new_next_frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700338{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400339 vlib_frame_t *f;
340 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700341 u32 n_used;
342
343 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
344
345 /* Make sure this next frame owns right to enqueue to destination frame. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400346 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347 vlib_next_frame_change_ownership (vm, node, next_index);
348
349 /* ??? Don't need valid flag: can use frame_index == ~0 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400350 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700351 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200352 nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
354 }
355
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200356 f = nf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357
358 /* Has frame been removed from pending vector (e.g. finished dispatching)?
359 If so we can reuse frame. */
Damjan Marion633b6fd2018-09-14 14:38:53 +0200360 if ((nf->flags & VLIB_FRAME_PENDING)
361 && !(f->frame_flags & VLIB_FRAME_PENDING))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362 {
363 nf->flags &= ~VLIB_FRAME_PENDING;
364 f->n_vectors = 0;
Damjan Marion9162c2d2018-11-20 09:55:10 +0100365 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700366 }
367
Damjan Marion296988d2019-02-21 20:24:54 +0100368 /* Allocate new frame if current one is marked as no-append or
369 it is already full. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700370 n_used = f->n_vectors;
Damjan Marion296988d2019-02-21 20:24:54 +0100371 if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0) ||
372 (f->frame_flags & VLIB_FRAME_NO_APPEND))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700373 {
374 /* Old frame may need to be freed after dispatch, since we'll have
Dave Barach9b8ffd92016-07-08 08:13:45 -0400375 two redundant frames from node -> next node. */
376 if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700377 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200378 vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame);
Damjan Marion633b6fd2018-09-14 14:38:53 +0200379 f_old->frame_flags |= VLIB_FRAME_FREE_AFTER_DISPATCH;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700380 }
381
382 /* Allocate new frame to replace full one. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200383 f = nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700384 n_used = f->n_vectors;
385 }
386
387 /* Should have free vectors in frame now. */
388 ASSERT (n_used < VLIB_FRAME_SIZE);
389
390 if (CLIB_DEBUG > 0)
391 {
392 validate_frame_magic (vm, f,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400393 vlib_get_node (vm, node->node_index), next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700394 }
395
396 return f;
397}
398
399static void
400vlib_put_next_frame_validate (vlib_main_t * vm,
401 vlib_node_runtime_t * rt,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400402 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700403{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400404 vlib_node_main_t *nm = &vm->node_main;
405 vlib_next_frame_t *nf;
406 vlib_frame_t *f;
407 vlib_node_runtime_t *next_rt;
408 vlib_node_t *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700409 u32 n_before, n_after;
410
411 nf = vlib_node_runtime_get_next_frame (vm, rt, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200412 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700413
414 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
Dave Barachc74b43c2020-04-09 17:24:07 -0400415
416 vlib_validate_frame_indices (f);
417
Ed Warnickecb9cada2015-12-08 15:45:58 -0700418 n_after = VLIB_FRAME_SIZE - n_vectors_left;
419 n_before = f->n_vectors;
420
421 ASSERT (n_after >= n_before);
422
423 next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
424 nf->node_runtime_index);
425 next_node = vlib_get_node (vm, next_rt->node_index);
426 if (n_after > 0 && next_node->validate_frame)
427 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400428 u8 *msg = next_node->validate_frame (vm, rt, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429 if (msg)
430 {
431 clib_warning ("%v", msg);
432 ASSERT (0);
433 }
434 vec_free (msg);
435 }
436}
437
438void
439vlib_put_next_frame (vlib_main_t * vm,
440 vlib_node_runtime_t * r,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400441 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400443 vlib_node_main_t *nm = &vm->node_main;
444 vlib_next_frame_t *nf;
445 vlib_frame_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700446 u32 n_vectors_in_frame;
447
Damjan Marion910d3692019-01-21 11:48:34 +0100448 if (CLIB_DEBUG > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700449 vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left);
450
451 nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200452 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700453
454 /* Make sure that magic number is still there. Otherwise, caller
455 has overrun frame meta data. */
456 if (CLIB_DEBUG > 0)
457 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400458 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700459 validate_frame_magic (vm, f, node, next_index);
460 }
461
462 /* Convert # of vectors left -> number of vectors there. */
463 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
464 n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left;
465
466 f->n_vectors = n_vectors_in_frame;
467
468 /* If vectors were added to frame, add to pending vector. */
469 if (PREDICT_TRUE (n_vectors_in_frame > 0))
470 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400471 vlib_pending_frame_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700472 u32 v0, v1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400473
Ed Warnickecb9cada2015-12-08 15:45:58 -0700474 r->cached_next_index = next_index;
475
Damjan Marion633b6fd2018-09-14 14:38:53 +0200476 if (!(f->frame_flags & VLIB_FRAME_PENDING))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400477 {
478 __attribute__ ((unused)) vlib_node_t *node;
479 vlib_node_t *next_node;
480 vlib_node_runtime_t *next_runtime;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700481
Dave Barach9b8ffd92016-07-08 08:13:45 -0400482 node = vlib_get_node (vm, r->node_index);
483 next_node = vlib_get_next_node (vm, r->node_index, next_index);
484 next_runtime = vlib_node_get_runtime (vm, next_node->index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485
Dave Barach9b8ffd92016-07-08 08:13:45 -0400486 vec_add2 (nm->pending_frames, p, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700487
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200488 p->frame = nf->frame;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400489 p->node_runtime_index = nf->node_runtime_index;
490 p->next_frame_index = nf - nm->next_frames;
491 nf->flags |= VLIB_FRAME_PENDING;
Damjan Marion633b6fd2018-09-14 14:38:53 +0200492 f->frame_flags |= VLIB_FRAME_PENDING;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493
Dave Barach9b8ffd92016-07-08 08:13:45 -0400494 /*
495 * If we're going to dispatch this frame on another thread,
496 * force allocation of a new frame. Otherwise, we create
497 * a dangling frame reference. Each thread has its own copy of
498 * the next_frames vector.
499 */
Damjan Marion586afd72017-04-05 19:18:20 +0200500 if (0 && r->thread_index != next_runtime->thread_index)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400501 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200502 nf->frame = NULL;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400503 nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED);
504 }
505 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700506
507 /* Copy trace flag from next_frame and from runtime. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400508 nf->flags |=
509 (nf->flags & VLIB_NODE_FLAG_TRACE) | (r->
510 flags & VLIB_NODE_FLAG_TRACE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700511
512 v0 = nf->vectors_since_last_overflow;
513 v1 = v0 + n_vectors_in_frame;
514 nf->vectors_since_last_overflow = v1;
515 if (PREDICT_FALSE (v1 < v0))
516 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400517 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700518 vec_elt (node->n_vectors_by_next_node, next_index) += v0;
519 }
520 }
521}
522
523/* Sync up runtime (32 bit counters) and main node stats (64 bit counters). */
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800524void
525vlib_node_runtime_sync_stats_node (vlib_node_t *n, vlib_node_runtime_t *r,
526 uword n_calls, uword n_vectors,
527 uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700528{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700529 n->stats_total.calls += n_calls + r->calls_since_last_overflow;
530 n->stats_total.vectors += n_vectors + r->vectors_since_last_overflow;
531 n->stats_total.clocks += n_clocks + r->clocks_since_last_overflow;
532 n->stats_total.max_clock = r->max_clock;
533 n->stats_total.max_clock_n = r->max_clock_n;
534
535 r->calls_since_last_overflow = 0;
536 r->vectors_since_last_overflow = 0;
537 r->clocks_since_last_overflow = 0;
538}
539
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800540void
541vlib_node_runtime_sync_stats (vlib_main_t *vm, vlib_node_runtime_t *r,
542 uword n_calls, uword n_vectors, uword n_clocks)
543{
544 vlib_node_t *n = vlib_get_node (vm, r->node_index);
545 vlib_node_runtime_sync_stats_node (n, r, n_calls, n_vectors, n_clocks);
546}
547
Dave Barach9b8ffd92016-07-08 08:13:45 -0400548always_inline void __attribute__ ((unused))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700549vlib_process_sync_stats (vlib_main_t * vm,
550 vlib_process_t * p,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000551 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700552{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400553 vlib_node_runtime_t *rt = &p->node_runtime;
554 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000555 vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700556 n->stats_total.suspends += p->n_suspends;
557 p->n_suspends = 0;
558}
559
Dave Barach9b8ffd92016-07-08 08:13:45 -0400560void
561vlib_node_sync_stats (vlib_main_t * vm, vlib_node_t * n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700562{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400563 vlib_node_runtime_t *rt;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700564
565 if (n->type == VLIB_NODE_TYPE_PROCESS)
566 {
567 /* Nothing to do for PROCESS nodes except in main thread */
Damjan Marionfd8deb42021-03-06 12:26:28 +0100568 if (vm != vlib_get_first_main ())
Dave Barach9b8ffd92016-07-08 08:13:45 -0400569 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700570
Dave Barach9b8ffd92016-07-08 08:13:45 -0400571 vlib_process_t *p = vlib_get_process_from_node (vm, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700572 n->stats_total.suspends += p->n_suspends;
573 p->n_suspends = 0;
574 rt = &p->node_runtime;
575 }
576 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400577 rt =
578 vec_elt_at_index (vm->node_main.nodes_by_type[n->type],
579 n->runtime_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700580
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000581 vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700582
583 /* Sync up runtime next frame vector counters with main node structure. */
584 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400585 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700586 uword i;
587 for (i = 0; i < rt->n_next_nodes; i++)
588 {
589 nf = vlib_node_runtime_get_next_frame (vm, rt, i);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400590 vec_elt (n->n_vectors_by_next_node, i) +=
591 nf->vectors_since_last_overflow;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700592 nf->vectors_since_last_overflow = 0;
593 }
594 }
595}
596
597always_inline u32
598vlib_node_runtime_update_stats (vlib_main_t * vm,
599 vlib_node_runtime_t * node,
600 uword n_calls,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000601 uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700602{
603 u32 ca0, ca1, v0, v1, cl0, cl1, r;
604
605 cl0 = cl1 = node->clocks_since_last_overflow;
606 ca0 = ca1 = node->calls_since_last_overflow;
607 v0 = v1 = node->vectors_since_last_overflow;
608
609 ca1 = ca0 + n_calls;
610 v1 = v0 + n_vectors;
611 cl1 = cl0 + n_clocks;
612
613 node->calls_since_last_overflow = ca1;
614 node->clocks_since_last_overflow = cl1;
615 node->vectors_since_last_overflow = v1;
Dave Barach4d1a8662018-09-10 12:31:15 -0400616
Ed Warnickecb9cada2015-12-08 15:45:58 -0700617 node->max_clock_n = node->max_clock > n_clocks ?
Dave Barach9b8ffd92016-07-08 08:13:45 -0400618 node->max_clock_n : n_vectors;
619 node->max_clock = node->max_clock > n_clocks ? node->max_clock : n_clocks;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700620
621 r = vlib_node_runtime_update_main_loop_vector_stats (vm, node, n_vectors);
622
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000623 if (PREDICT_FALSE (ca1 < ca0 || v1 < v0 || cl1 < cl0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700624 {
625 node->calls_since_last_overflow = ca0;
626 node->clocks_since_last_overflow = cl0;
627 node->vectors_since_last_overflow = v0;
Dave Barach4d1a8662018-09-10 12:31:15 -0400628
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000629 vlib_node_runtime_sync_stats (vm, node, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700630 }
631
632 return r;
633}
634
Dave Barach17e5d802019-05-01 11:30:13 -0400635always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700636vlib_process_update_stats (vlib_main_t * vm,
637 vlib_process_t * p,
Dave Barachec595ef2019-01-24 10:34:24 -0500638 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700639{
640 vlib_node_runtime_update_stats (vm, &p->node_runtime,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000641 n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700642}
643
644static clib_error_t *
645vlib_cli_elog_clear (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400646 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700647{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100648 elog_reset_buffer (&vlib_global_main.elog_main);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700649 return 0;
650}
651
Dave Barach9b8ffd92016-07-08 08:13:45 -0400652/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700653VLIB_CLI_COMMAND (elog_clear_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400654 .path = "event-logger clear",
655 .short_help = "Clear the event log",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700656 .function = vlib_cli_elog_clear,
657};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400658/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700659
660#ifdef CLIB_UNIX
661static clib_error_t *
662elog_save_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400663 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700664{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100665 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400666 char *file, *chroot_file;
667 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700668
Dave Barach9b8ffd92016-07-08 08:13:45 -0400669 if (!unformat (input, "%s", &file))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700670 {
671 vlib_cli_output (vm, "expected file name, got `%U'",
672 format_unformat_error, input);
673 return 0;
674 }
675
676 /* It's fairly hard to get "../oopsie" through unformat; just in case */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400677 if (strstr (file, "..") || index (file, '/'))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700678 {
679 vlib_cli_output (vm, "illegal characters in filename '%s'", file);
680 return 0;
681 }
682
683 chroot_file = (char *) format (0, "/tmp/%s%c", file, 0);
684
Dave Barach9b8ffd92016-07-08 08:13:45 -0400685 vec_free (file);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700686
687 vlib_cli_output (vm, "Saving %wd of %wd events to %s",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400688 elog_n_events_in_buffer (em),
689 elog_buffer_capacity (em), chroot_file);
690
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691 vlib_worker_thread_barrier_sync (vm);
Dave Barach903fd512017-04-01 11:07:40 -0400692 error = elog_write_file (em, chroot_file, 1 /* flush ring */ );
Dave Barach9b8ffd92016-07-08 08:13:45 -0400693 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700694 vec_free (chroot_file);
695 return error;
696}
697
Dave Barach81481312017-05-16 09:08:14 -0400698void
Dave Barach27d978c2020-11-03 09:59:06 -0500699vlib_post_mortem_dump (void)
Dave Barach81481312017-05-16 09:08:14 -0400700{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100701 vlib_global_main_t *vgm = vlib_get_global_main ();
Dave Barach27d978c2020-11-03 09:59:06 -0500702
Damjan Marionfd8deb42021-03-06 12:26:28 +0100703 for (int i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
704 (vgm->post_mortem_callbacks[i]) ();
Dave Barach81481312017-05-16 09:08:14 -0400705}
706
Dave Barach9b8ffd92016-07-08 08:13:45 -0400707/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708VLIB_CLI_COMMAND (elog_save_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400709 .path = "event-logger save",
710 .short_help = "event-logger save <filename> (saves log in /tmp/<filename>)",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700711 .function = elog_save_buffer,
712};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400713/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700714
Dave Barache5389bb2016-03-28 17:12:19 -0400715static clib_error_t *
716elog_stop (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400717 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400718{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100719 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400720
721 em->n_total_events_disable_limit = em->n_total_events;
722
723 vlib_cli_output (vm, "Stopped the event logger...");
724 return 0;
725}
726
Dave Barach9b8ffd92016-07-08 08:13:45 -0400727/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400728VLIB_CLI_COMMAND (elog_stop_cli, static) = {
729 .path = "event-logger stop",
730 .short_help = "Stop the event-logger",
731 .function = elog_stop,
732};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400733/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400734
735static clib_error_t *
736elog_restart (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400737 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400738{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100739 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400740
741 em->n_total_events_disable_limit = ~0;
742
743 vlib_cli_output (vm, "Restarted the event logger...");
744 return 0;
745}
746
Dave Barach9b8ffd92016-07-08 08:13:45 -0400747/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400748VLIB_CLI_COMMAND (elog_restart_cli, static) = {
749 .path = "event-logger restart",
750 .short_help = "Restart the event-logger",
751 .function = elog_restart,
752};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400753/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400754
755static clib_error_t *
Dave Barachbc867c32020-11-25 10:07:09 -0500756elog_resize_command_fn (vlib_main_t * vm,
757 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400758{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100759 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400760 u32 tmp;
761
762 /* Stop the parade */
Damjan Marionf553a2c2021-03-26 13:45:37 +0100763 elog_reset_buffer (em);
Dave Barache5389bb2016-03-28 17:12:19 -0400764
765 if (unformat (input, "%d", &tmp))
766 {
767 elog_alloc (em, tmp);
768 em->n_total_events_disable_limit = ~0;
769 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400770 else
Dave Barache5389bb2016-03-28 17:12:19 -0400771 return clib_error_return (0, "Must specify how many events in the ring");
772
773 vlib_cli_output (vm, "Resized ring and restarted the event logger...");
774 return 0;
775}
776
Dave Barach9b8ffd92016-07-08 08:13:45 -0400777/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400778VLIB_CLI_COMMAND (elog_resize_cli, static) = {
779 .path = "event-logger resize",
780 .short_help = "event-logger resize <nnn>",
Dave Barachbc867c32020-11-25 10:07:09 -0500781 .function = elog_resize_command_fn,
Dave Barache5389bb2016-03-28 17:12:19 -0400782};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400783/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400784
Ed Warnickecb9cada2015-12-08 15:45:58 -0700785#endif /* CLIB_UNIX */
786
Dave Barach9b8ffd92016-07-08 08:13:45 -0400787static void
788elog_show_buffer_internal (vlib_main_t * vm, u32 n_events_to_show)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700789{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100790 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400791 elog_event_t *e, *es;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700792 f64 dt;
793
794 /* Show events in VLIB time since log clock starts after VLIB clock. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400795 dt = (em->init_time.cpu - vm->clib_time.init_cpu_time)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700796 * vm->clib_time.seconds_per_clock;
797
798 es = elog_peek_events (em);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400799 vlib_cli_output (vm, "%d of %d events in buffer, logger %s", vec_len (es),
800 em->event_ring_size,
801 em->n_total_events < em->n_total_events_disable_limit ?
802 "running" : "stopped");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700803 vec_foreach (e, es)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400804 {
805 vlib_cli_output (vm, "%18.9f: %U",
806 e->time + dt, format_elog_event, em, e);
807 n_events_to_show--;
808 if (n_events_to_show == 0)
809 break;
810 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700811 vec_free (es);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400812
Ed Warnickecb9cada2015-12-08 15:45:58 -0700813}
814
815static clib_error_t *
816elog_show_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400817 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700818{
819 u32 n_events_to_show;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400820 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700821
822 n_events_to_show = 250;
823 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
824 {
825 if (unformat (input, "%d", &n_events_to_show))
826 ;
827 else if (unformat (input, "all"))
828 n_events_to_show = ~0;
829 else
830 return unformat_parse_error (input);
831 }
832 elog_show_buffer_internal (vm, n_events_to_show);
833 return error;
834}
835
Dave Barach9b8ffd92016-07-08 08:13:45 -0400836/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700837VLIB_CLI_COMMAND (elog_show_cli, static) = {
838 .path = "show event-logger",
839 .short_help = "Show event logger info",
840 .function = elog_show_buffer,
841};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400842/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700843
Dave Barach9b8ffd92016-07-08 08:13:45 -0400844void
845vlib_gdb_show_event_log (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700846{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400847 elog_show_buffer_internal (vlib_get_main (), (u32) ~ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700848}
849
Dave Barachfb6e59d2016-03-26 18:45:42 -0400850static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700851vlib_elog_main_loop_event (vlib_main_t * vm,
852 u32 node_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400853 u64 time, u32 n_vectors, u32 is_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700854{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100855 vlib_main_t *evm = vlib_get_first_main ();
Damjan Marionf553a2c2021-03-26 13:45:37 +0100856 elog_main_t *em = vlib_get_elog_main ();
Dave Barach900cbad2019-01-31 19:12:51 -0500857 int enabled = evm->elog_trace_graph_dispatch |
858 evm->elog_trace_graph_circuit;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700859
Dave Barach900cbad2019-01-31 19:12:51 -0500860 if (PREDICT_FALSE (enabled && n_vectors))
861 {
862 if (PREDICT_FALSE (!elog_is_enabled (em)))
863 {
864 evm->elog_trace_graph_dispatch = 0;
865 evm->elog_trace_graph_circuit = 0;
866 return;
867 }
868 if (PREDICT_TRUE
869 (evm->elog_trace_graph_dispatch ||
870 (evm->elog_trace_graph_circuit &&
871 node_index == evm->elog_trace_graph_circuit_node_index)))
872 {
873 elog_track (em,
874 /* event type */
875 vec_elt_at_index (is_return
876 ? evm->node_return_elog_event_types
877 : evm->node_call_elog_event_types,
878 node_index),
879 /* track */
880 (vm->thread_index ?
881 &vlib_worker_threads[vm->thread_index].elog_track
882 : &em->default_track),
883 /* data to log */ n_vectors);
884 }
885 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700886}
887
Dave Barach7bee7732017-10-18 18:48:11 -0400888static inline void
889add_trajectory_trace (vlib_buffer_t * b, u32 node_index)
890{
891#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
Benoît Gannef89bbbe2021-03-04 14:31:03 +0100892 if (PREDICT_FALSE (b->trajectory_nb >= VLIB_BUFFER_TRACE_TRAJECTORY_MAX))
893 return;
894 b->trajectory_trace[b->trajectory_nb] = node_index;
895 b->trajectory_nb++;
Dave Barach7bee7732017-10-18 18:48:11 -0400896#endif
897}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700898
Damjan Marion9a332e12017-03-28 15:11:20 +0200899static_always_inline u64
Ed Warnickecb9cada2015-12-08 15:45:58 -0700900dispatch_node (vlib_main_t * vm,
901 vlib_node_runtime_t * node,
902 vlib_node_type_t type,
903 vlib_node_state_t dispatch_state,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400904 vlib_frame_t * frame, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700905{
906 uword n, v;
907 u64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400908 vlib_node_main_t *nm = &vm->node_main;
909 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700910
911 if (CLIB_DEBUG > 0)
912 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400913 vlib_node_t *n = vlib_get_node (vm, node->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700914 ASSERT (n->type == type);
915 }
916
917 /* Only non-internal nodes may be disabled. */
918 if (type != VLIB_NODE_TYPE_INTERNAL && node->state != dispatch_state)
919 {
920 ASSERT (type != VLIB_NODE_TYPE_INTERNAL);
921 return last_time_stamp;
922 }
923
924 if ((type == VLIB_NODE_TYPE_PRE_INPUT || type == VLIB_NODE_TYPE_INPUT)
925 && dispatch_state != VLIB_NODE_STATE_INTERRUPT)
926 {
927 u32 c = node->input_main_loops_per_call;
928 /* Only call node when count reaches zero. */
929 if (c)
930 {
931 node->input_main_loops_per_call = c - 1;
932 return last_time_stamp;
933 }
934 }
935
936 /* Speculatively prefetch next frames. */
937 if (node->n_next_nodes > 0)
938 {
939 nf = vec_elt_at_index (nm->next_frames, node->next_frame_index);
940 CLIB_PREFETCH (nf, 4 * sizeof (nf[0]), WRITE);
941 }
942
943 vm->cpu_time_last_node_dispatch = last_time_stamp;
944
Dave Barach900cbad2019-01-31 19:12:51 -0500945 vlib_elog_main_loop_event (vm, node->node_index,
946 last_time_stamp, frame ? frame->n_vectors : 0,
947 /* is_after */ 0);
948
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000949 vlib_node_runtime_perf_counter (vm, node, frame, 0, last_time_stamp,
950 VLIB_NODE_RUNTIME_PERF_BEFORE);
Dave Barach900cbad2019-01-31 19:12:51 -0500951
952 /*
953 * Turn this on if you run into
954 * "bad monkey" contexts, and you want to know exactly
955 * which nodes they've visited... See ixge.c...
956 */
957 if (VLIB_BUFFER_TRACE_TRAJECTORY && frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700958 {
Dave Barach900cbad2019-01-31 19:12:51 -0500959 int i;
960 u32 *from;
961 from = vlib_frame_vector_args (frame);
962 for (i = 0; i < frame->n_vectors; i++)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400963 {
Dave Barach900cbad2019-01-31 19:12:51 -0500964 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
965 add_trajectory_trace (b, node->node_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400966 }
Damjan Marion8b60fb02020-11-27 20:15:17 +0100967 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
968 n = node->function (vm, node, frame);
969 else
970 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -0500971 }
972 else
973 {
Damjan Marion8b60fb02020-11-27 20:15:17 +0100974 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
975 n = node->function (vm, node, frame);
976 else
977 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -0500978 }
979
980 t = clib_cpu_time_now ();
981
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000982 vlib_node_runtime_perf_counter (vm, node, frame, n, t,
983 VLIB_NODE_RUNTIME_PERF_AFTER);
Dave Barach900cbad2019-01-31 19:12:51 -0500984
985 vlib_elog_main_loop_event (vm, node->node_index, t, n, 1 /* is_after */ );
986
987 vm->main_loop_vectors_processed += n;
988 vm->main_loop_nodes_processed += n > 0;
989
990 v = vlib_node_runtime_update_stats (vm, node,
991 /* n_calls */ 1,
992 /* n_vectors */ n,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000993 /* n_clocks */ t - last_time_stamp);
Dave Barach900cbad2019-01-31 19:12:51 -0500994
Florin Coras982e44f2021-03-19 13:12:41 -0700995 /* When in adaptive mode and vector rate crosses threshold switch to
996 polling mode and vice versa. */
997 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_ADAPTIVE_MODE))
Dave Barach900cbad2019-01-31 19:12:51 -0500998 {
999 /* *INDENT-OFF* */
1000 ELOG_TYPE_DECLARE (e) =
1001 {
1002 .function = (char *) __FUNCTION__,
1003 .format = "%s vector length %d, switching to %s",
1004 .format_args = "T4i4t4",
1005 .n_enum_strings = 2,
1006 .enum_strings = {
1007 "interrupt", "polling",
1008 },
1009 };
1010 /* *INDENT-ON* */
1011 struct
1012 {
1013 u32 node_name, vector_length, is_polling;
1014 } *ed;
1015
1016 if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT
1017 && v >= nm->polling_threshold_vector_length) &&
1018 !(node->flags &
1019 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
Dave Barach3ae28732018-11-16 17:19:00 -05001020 {
Dave Barach900cbad2019-01-31 19:12:51 -05001021 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1022 n->state = VLIB_NODE_STATE_POLLING;
1023 node->state = VLIB_NODE_STATE_POLLING;
1024 node->flags &=
1025 ~VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
1026 node->flags |= VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1027 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] -= 1;
1028 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001029
Damjan Marionfd8deb42021-03-06 12:26:28 +01001030 if (PREDICT_FALSE (
1031 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001032 {
Dave Barach900cbad2019-01-31 19:12:51 -05001033 vlib_worker_thread_t *w = vlib_worker_threads
1034 + vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001035
Steven6aa75af2017-02-24 10:03:22 -08001036 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1037 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001038 ed->node_name = n->name_elog_string;
1039 ed->vector_length = v;
1040 ed->is_polling = 1;
1041 }
Dave Barach900cbad2019-01-31 19:12:51 -05001042 }
1043 else if (dispatch_state == VLIB_NODE_STATE_POLLING
1044 && v <= nm->interrupt_threshold_vector_length)
1045 {
1046 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1047 if (node->flags &
1048 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001049 {
Dave Barach900cbad2019-01-31 19:12:51 -05001050 /* Switch to interrupt mode after dispatch in polling one more time.
1051 This allows driver to re-enable interrupts. */
1052 n->state = VLIB_NODE_STATE_INTERRUPT;
1053 node->state = VLIB_NODE_STATE_INTERRUPT;
1054 node->flags &=
1055 ~VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1056 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] -= 1;
1057 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] += 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001058
Dave Barach900cbad2019-01-31 19:12:51 -05001059 }
1060 else
1061 {
1062 vlib_worker_thread_t *w = vlib_worker_threads
1063 + vm->thread_index;
1064 node->flags |=
1065 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001066 if (PREDICT_FALSE (
1067 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001068 {
Steven6aa75af2017-02-24 10:03:22 -08001069 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1070 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001071 ed->node_name = n->name_elog_string;
1072 ed->vector_length = v;
1073 ed->is_polling = 0;
1074 }
1075 }
1076 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001077 }
1078
1079 return t;
1080}
1081
Damjan Marion9a332e12017-03-28 15:11:20 +02001082static u64
Dave Baracha6269992017-06-07 08:18:49 -04001083dispatch_pending_node (vlib_main_t * vm, uword pending_frame_index,
1084 u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001085{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001086 vlib_node_main_t *nm = &vm->node_main;
1087 vlib_frame_t *f;
Dave Barach11fb09e2020-08-06 12:10:09 -04001088 vlib_next_frame_t *nf, nf_placeholder;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001089 vlib_node_runtime_t *n;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001090 vlib_frame_t *restore_frame;
Dave Baracha6269992017-06-07 08:18:49 -04001091 vlib_pending_frame_t *p;
1092
1093 /* See comment below about dangling references to nm->pending_frames */
1094 p = nm->pending_frames + pending_frame_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001095
1096 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1097 p->node_runtime_index);
1098
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001099 f = vlib_get_frame (vm, p->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001100 if (p->next_frame_index == VLIB_PENDING_FRAME_NO_NEXT_FRAME)
1101 {
Dave Barach11fb09e2020-08-06 12:10:09 -04001102 /* No next frame: so use placeholder on stack. */
1103 nf = &nf_placeholder;
Damjan Marion633b6fd2018-09-14 14:38:53 +02001104 nf->flags = f->frame_flags & VLIB_NODE_FLAG_TRACE;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001105 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001106 }
1107 else
1108 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
1109
Damjan Marion633b6fd2018-09-14 14:38:53 +02001110 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001111
1112 /* Force allocation of new frame while current frame is being
1113 dispatched. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001114 restore_frame = NULL;
1115 if (nf->frame == p->frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001116 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001117 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001118 nf->flags &= ~VLIB_FRAME_IS_ALLOCATED;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001119 if (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH))
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001120 restore_frame = p->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001121 }
1122
1123 /* Frame must be pending. */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001124 ASSERT (f->frame_flags & VLIB_FRAME_PENDING);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001125 ASSERT (f->n_vectors > 0);
1126
1127 /* Copy trace flag from next frame to node.
1128 Trace flag indicates that at least one vector in the dispatched
1129 frame is traced. */
1130 n->flags &= ~VLIB_NODE_FLAG_TRACE;
1131 n->flags |= (nf->flags & VLIB_FRAME_TRACE) ? VLIB_NODE_FLAG_TRACE : 0;
1132 nf->flags &= ~VLIB_FRAME_TRACE;
1133
1134 last_time_stamp = dispatch_node (vm, n,
1135 VLIB_NODE_TYPE_INTERNAL,
1136 VLIB_NODE_STATE_POLLING,
1137 f, last_time_stamp);
Dave Baracha8df85c2019-10-01 13:34:23 -04001138 /* Internal node vector-rate accounting, for summary stats */
1139 vm->internal_node_vectors += f->n_vectors;
1140 vm->internal_node_calls++;
1141 vm->internal_node_last_vectors_per_main_loop =
1142 (f->n_vectors > vm->internal_node_last_vectors_per_main_loop) ?
1143 f->n_vectors : vm->internal_node_last_vectors_per_main_loop;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001144
Damjan Marion296988d2019-02-21 20:24:54 +01001145 f->frame_flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_NO_APPEND);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001146
1147 /* Frame is ready to be used again, so restore it. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001148 if (restore_frame != NULL)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001149 {
Dave Baracha6269992017-06-07 08:18:49 -04001150 /*
1151 * We musn't restore a frame that is flagged to be freed. This
1152 * shouldn't happen since frames to be freed post dispatch are
1153 * those used when the to-node frame becomes full i.e. they form a
1154 * sort of queue of frames to a single node. If we get here then
1155 * the to-node frame and the pending frame *were* the same, and so
1156 * we removed the to-node frame. Therefore this frame is no
1157 * longer part of the queue for that node and hence it cannot be
1158 * it's overspill.
Neale Ranns88170612016-11-22 08:29:51 +00001159 */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001160 ASSERT (!(f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH));
Neale Ranns88170612016-11-22 08:29:51 +00001161
Dave Baracha6269992017-06-07 08:18:49 -04001162 /*
1163 * NB: dispatching node n can result in the creation and scheduling
1164 * of new frames, and hence in the reallocation of nm->pending_frames.
1165 * Recompute p, or no supper. This was broken for more than 10 years.
1166 */
1167 p = nm->pending_frames + pending_frame_index;
1168
1169 /*
1170 * p->next_frame_index can change during node dispatch if node
1171 * function decides to change graph hook up.
1172 */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001173 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001174 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001175
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001176 if (NULL == nf->frame)
Neale Ranns88170612016-11-22 08:29:51 +00001177 {
1178 /* no new frame has been assigned to this node, use the saved one */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001179 nf->frame = restore_frame;
Neale Ranns88170612016-11-22 08:29:51 +00001180 f->n_vectors = 0;
1181 }
1182 else
1183 {
1184 /* The node has gained a frame, implying packets from the current frame
1185 were re-queued to this same node. we don't need the saved one
1186 anymore */
1187 vlib_frame_free (vm, n, f);
1188 }
1189 }
1190 else
Ed Warnickecb9cada2015-12-08 15:45:58 -07001191 {
Damjan Marion633b6fd2018-09-14 14:38:53 +02001192 if (f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH)
Neale Ranns88170612016-11-22 08:29:51 +00001193 {
1194 ASSERT (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH));
1195 vlib_frame_free (vm, n, f);
1196 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001197 }
1198
1199 return last_time_stamp;
1200}
1201
1202always_inline uword
1203vlib_process_stack_is_valid (vlib_process_t * p)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001204{
1205 return p->stack[0] == VLIB_PROCESS_STACK_MAGIC;
1206}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001207
Dave Barach9b8ffd92016-07-08 08:13:45 -04001208typedef struct
1209{
1210 vlib_main_t *vm;
1211 vlib_process_t *process;
1212 vlib_frame_t *frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001213} vlib_process_bootstrap_args_t;
1214
1215/* Called in process stack. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001216static uword
1217vlib_process_bootstrap (uword _a)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001218{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001219 vlib_process_bootstrap_args_t *a;
1220 vlib_main_t *vm;
1221 vlib_node_runtime_t *node;
1222 vlib_frame_t *f;
1223 vlib_process_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001224 uword n;
1225
1226 a = uword_to_pointer (_a, vlib_process_bootstrap_args_t *);
1227
1228 vm = a->vm;
1229 p = a->process;
Damjan Marioncea46522020-05-21 16:47:05 +02001230 vlib_process_finish_switch_stack (vm);
1231
Ed Warnickecb9cada2015-12-08 15:45:58 -07001232 f = a->frame;
1233 node = &p->node_runtime;
1234
1235 n = node->function (vm, node, f);
1236
1237 ASSERT (vlib_process_stack_is_valid (p));
1238
Damjan Marioncea46522020-05-21 16:47:05 +02001239 vlib_process_start_switch_stack (vm, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001240 clib_longjmp (&p->return_longjmp, n);
1241
1242 return n;
1243}
1244
1245/* Called in main stack. */
1246static_always_inline uword
Dave Barach9b8ffd92016-07-08 08:13:45 -04001247vlib_process_startup (vlib_main_t * vm, vlib_process_t * p, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001248{
1249 vlib_process_bootstrap_args_t a;
1250 uword r;
1251
1252 a.vm = vm;
1253 a.process = p;
1254 a.frame = f;
1255
1256 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1257 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001258 {
1259 vlib_process_start_switch_stack (vm, p);
1260 r = clib_calljmp (vlib_process_bootstrap, pointer_to_uword (&a),
1261 (void *) p->stack + (1 << p->log2_n_stack_bytes));
1262 }
1263 else
1264 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001265
1266 return r;
1267}
1268
1269static_always_inline uword
Damjan Marioncea46522020-05-21 16:47:05 +02001270vlib_process_resume (vlib_main_t * vm, vlib_process_t * p)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001271{
1272 uword r;
1273 p->flags &= ~(VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1274 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT
1275 | VLIB_PROCESS_RESUME_PENDING);
1276 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1277 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001278 {
1279 vlib_process_start_switch_stack (vm, p);
1280 clib_longjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_RESUME);
1281 }
1282 else
1283 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001284 return r;
1285}
1286
1287static u64
1288dispatch_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001289 vlib_process_t * p, vlib_frame_t * f, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001290{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001291 vlib_node_main_t *nm = &vm->node_main;
1292 vlib_node_runtime_t *node_runtime = &p->node_runtime;
1293 vlib_node_t *node = vlib_get_node (vm, node_runtime->node_index);
Florin Corasfd542f12018-05-16 19:28:24 -07001294 u32 old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001295 u64 t;
1296 uword n_vectors, is_suspend;
1297
1298 if (node->state != VLIB_NODE_STATE_POLLING
1299 || (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1300 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT)))
1301 return last_time_stamp;
1302
1303 p->flags |= VLIB_PROCESS_IS_RUNNING;
1304
1305 t = last_time_stamp;
1306 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1307 f ? f->n_vectors : 0, /* is_after */ 0);
1308
1309 /* Save away current process for suspend. */
Florin Corasfd542f12018-05-16 19:28:24 -07001310 old_process_index = nm->current_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001311 nm->current_process_index = node->runtime_index;
1312
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001313 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1314 VLIB_NODE_RUNTIME_PERF_BEFORE);
1315
Ed Warnickecb9cada2015-12-08 15:45:58 -07001316 n_vectors = vlib_process_startup (vm, p, f);
1317
Florin Corasfd542f12018-05-16 19:28:24 -07001318 nm->current_process_index = old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001319
1320 ASSERT (n_vectors != VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1321 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1322 if (is_suspend)
1323 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001324 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001325
1326 n_vectors = 0;
1327 pool_get (nm->suspended_process_frames, pf);
1328 pf->node_runtime_index = node->runtime_index;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001329 pf->frame = f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001330 pf->next_frame_index = ~0;
1331
1332 p->n_suspends += 1;
1333 p->suspended_process_frame_index = pf - nm->suspended_process_frames;
1334
1335 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001336 {
1337 TWT (tw_timer_wheel) * tw =
1338 (TWT (tw_timer_wheel) *) nm->timing_wheel;
1339 p->stop_timer_handle =
1340 TW (tw_timer_start) (tw,
1341 vlib_timing_wheel_data_set_suspended_process
1342 (node->runtime_index) /* [sic] pool idex */ ,
1343 0 /* timer_id */ ,
1344 p->resume_clock_interval);
1345 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001346 }
1347 else
1348 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
1349
1350 t = clib_cpu_time_now ();
1351
Dave Barach9b8ffd92016-07-08 08:13:45 -04001352 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, is_suspend,
1353 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001354
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001355 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1356 VLIB_NODE_RUNTIME_PERF_AFTER);
1357
Ed Warnickecb9cada2015-12-08 15:45:58 -07001358 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001359 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001360 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001361 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001362
1363 return t;
1364}
1365
Dave Barach9b8ffd92016-07-08 08:13:45 -04001366void
1367vlib_start_process (vlib_main_t * vm, uword process_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001368{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001369 vlib_node_main_t *nm = &vm->node_main;
1370 vlib_process_t *p = vec_elt (nm->processes, process_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001371 dispatch_process (vm, p, /* frame */ 0, /* cpu_time_now */ 0);
1372}
1373
1374static u64
1375dispatch_suspended_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001376 uword process_index, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001377{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001378 vlib_node_main_t *nm = &vm->node_main;
1379 vlib_node_runtime_t *node_runtime;
1380 vlib_node_t *node;
1381 vlib_frame_t *f;
1382 vlib_process_t *p;
1383 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001384 u64 t, n_vectors, is_suspend;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001385
Ed Warnickecb9cada2015-12-08 15:45:58 -07001386 t = last_time_stamp;
1387
1388 p = vec_elt (nm->processes, process_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001389 if (PREDICT_FALSE (!(p->flags & VLIB_PROCESS_IS_RUNNING)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001390 return last_time_stamp;
1391
1392 ASSERT (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1393 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT));
1394
Florin Coras221d6f12018-11-07 20:46:38 -08001395 pf = pool_elt_at_index (nm->suspended_process_frames,
1396 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001397
1398 node_runtime = &p->node_runtime;
1399 node = vlib_get_node (vm, node_runtime->node_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001400 f = pf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001401
Dave Barach9b8ffd92016-07-08 08:13:45 -04001402 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1403 f ? f->n_vectors : 0, /* is_after */ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001404
1405 /* Save away current process for suspend. */
1406 nm->current_process_index = node->runtime_index;
1407
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001408 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1409 VLIB_NODE_RUNTIME_PERF_BEFORE);
1410
Damjan Marioncea46522020-05-21 16:47:05 +02001411 n_vectors = vlib_process_resume (vm, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001412 t = clib_cpu_time_now ();
1413
1414 nm->current_process_index = ~0;
1415
1416 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1417 if (is_suspend)
1418 {
1419 /* Suspend it again. */
1420 n_vectors = 0;
1421 p->n_suspends += 1;
1422 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001423 {
1424 p->stop_timer_handle =
1425 TW (tw_timer_start) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
1426 vlib_timing_wheel_data_set_suspended_process
1427 (node->runtime_index) /* [sic] pool idex */ ,
1428 0 /* timer_id */ ,
1429 p->resume_clock_interval);
1430 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001431 }
1432 else
1433 {
1434 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
Florin Coras221d6f12018-11-07 20:46:38 -08001435 pool_put_index (nm->suspended_process_frames,
1436 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001437 p->suspended_process_frame_index = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001438 }
1439
1440 t = clib_cpu_time_now ();
Dave Barach9b8ffd92016-07-08 08:13:45 -04001441 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, !is_suspend,
1442 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001443
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001444 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1445 VLIB_NODE_RUNTIME_PERF_AFTER);
1446
Ed Warnickecb9cada2015-12-08 15:45:58 -07001447 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001448 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001449 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001450 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001451
1452 return t;
1453}
1454
Dave Barach2877eee2017-12-15 12:22:57 -05001455void vl_api_send_pending_rpc_requests (vlib_main_t *) __attribute__ ((weak));
1456void
1457vl_api_send_pending_rpc_requests (vlib_main_t * vm)
1458{
1459}
1460
Damjan Marione9d52d52017-03-09 15:42:26 +01001461static_always_inline void
1462vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001463{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001464 vlib_node_main_t *nm = &vm->node_main;
Damjan Marione9d52d52017-03-09 15:42:26 +01001465 vlib_thread_main_t *tm = vlib_get_thread_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001466 uword i;
1467 u64 cpu_time_now;
Dave Barach000a0292020-02-17 17:07:12 -05001468 f64 now;
Damjan Marione9d52d52017-03-09 15:42:26 +01001469 vlib_frame_queue_main_t *fqm;
Dave Barach80965f52019-03-11 09:57:38 -04001470 u32 frame_queue_check_counter = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001471
1472 /* Initialize pending node vector. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001473 if (is_main)
1474 {
1475 vec_resize (nm->pending_frames, 32);
1476 _vec_len (nm->pending_frames) = 0;
1477 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001478
1479 /* Mark time of main loop start. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001480 if (is_main)
1481 {
1482 cpu_time_now = vm->clib_time.last_cpu_time;
1483 vm->cpu_time_main_loop_start = cpu_time_now;
1484 }
1485 else
1486 cpu_time_now = clib_cpu_time_now ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001487
Damjan Marion2c2b6402017-03-28 14:16:15 +02001488 /* Pre-allocate interupt runtime indices and lock. */
Damjan Marion94100532020-11-06 23:25:57 +01001489 vec_alloc_aligned (nm->pending_interrupts, 1, CLIB_CACHE_LINE_BYTES);
Damjan Marion2c2b6402017-03-28 14:16:15 +02001490
1491 /* Pre-allocate expired nodes. */
Steven7312cc72017-03-15 21:18:55 -07001492 if (!nm->polling_threshold_vector_length)
1493 nm->polling_threshold_vector_length = 10;
1494 if (!nm->interrupt_threshold_vector_length)
1495 nm->interrupt_threshold_vector_length = 5;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001496
Damjan Marion29c0b332019-01-28 13:41:27 +01001497 vm->cpu_id = clib_get_current_cpu_id ();
1498 vm->numa_node = clib_get_current_numa_node ();
Florin Coras4c959952020-02-09 18:09:31 +00001499 os_set_numa_index (vm->numa_node);
Damjan Marion29c0b332019-01-28 13:41:27 +01001500
Ed Warnickecb9cada2015-12-08 15:45:58 -07001501 /* Start all processes. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001502 if (is_main)
1503 {
1504 uword i;
Dave Barachc602b382019-06-03 19:48:22 -04001505
1506 /*
1507 * Perform an initial barrier sync. Pays no attention to
1508 * the barrier sync hold-down timer scheme, which won't work
1509 * at this point in time.
1510 */
1511 vlib_worker_thread_initial_barrier_sync_and_release (vm);
1512
Stevenf3b53642017-05-01 14:03:02 -07001513 nm->current_process_index = ~0;
Damjan Marione9d52d52017-03-09 15:42:26 +01001514 for (i = 0; i < vec_len (nm->processes); i++)
1515 cpu_time_now = dispatch_process (vm, nm->processes[i], /* frame */ 0,
1516 cpu_time_now);
1517 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001518
1519 while (1)
1520 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001521 vlib_node_runtime_t *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001522
Dave Barach2877eee2017-12-15 12:22:57 -05001523 if (PREDICT_FALSE (_vec_len (vm->pending_rpc_requests) > 0))
Dave Barachf6c68d72018-11-01 08:12:52 -04001524 {
1525 if (!is_main)
1526 vl_api_send_pending_rpc_requests (vm);
1527 }
Dave Barach2877eee2017-12-15 12:22:57 -05001528
Damjan Marione9d52d52017-03-09 15:42:26 +01001529 if (!is_main)
Damjan Marionf6e6c782020-09-17 09:54:07 +02001530 vlib_worker_thread_barrier_check ();
1531
1532 if (PREDICT_FALSE (vm->check_frame_queues + frame_queue_check_counter))
Damjan Marione9d52d52017-03-09 15:42:26 +01001533 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001534 u32 processed = 0;
Damjan Marioneee099e2021-05-01 14:56:13 +02001535 vlib_frame_queue_dequeue_fn_t *fn =
1536 vlib_buffer_func_main.frame_queue_dequeue_fn;
Damjan Marionf6e6c782020-09-17 09:54:07 +02001537
1538 if (vm->check_frame_queues)
Dave Barach80965f52019-03-11 09:57:38 -04001539 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001540 frame_queue_check_counter = 100;
1541 vm->check_frame_queues = 0;
Dave Barach80965f52019-03-11 09:57:38 -04001542 }
Damjan Marionf6e6c782020-09-17 09:54:07 +02001543
1544 vec_foreach (fqm, tm->frame_queue_mains)
Damjan Marioneee099e2021-05-01 14:56:13 +02001545 processed += (fn) (vm, fqm);
Damjan Marionf6e6c782020-09-17 09:54:07 +02001546
1547 /* No handoff queue work found? */
1548 if (processed)
1549 frame_queue_check_counter = 100;
1550 else
1551 frame_queue_check_counter--;
Damjan Marione9d52d52017-03-09 15:42:26 +01001552 }
1553
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001554 if (PREDICT_FALSE (vec_len (vm->worker_thread_main_loop_callbacks)))
1555 clib_call_callbacks (vm->worker_thread_main_loop_callbacks, vm,
1556 cpu_time_now);
1557
Ed Warnickecb9cada2015-12-08 15:45:58 -07001558 /* Process pre-input nodes. */
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001559 cpu_time_now = clib_cpu_time_now ();
Damjan Marionceab7882018-01-19 20:56:12 +01001560 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1561 cpu_time_now = dispatch_node (vm, n,
1562 VLIB_NODE_TYPE_PRE_INPUT,
1563 VLIB_NODE_STATE_POLLING,
1564 /* frame */ 0,
1565 cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001566
1567 /* Next process input nodes. */
1568 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1569 cpu_time_now = dispatch_node (vm, n,
1570 VLIB_NODE_TYPE_INPUT,
1571 VLIB_NODE_STATE_POLLING,
1572 /* frame */ 0,
1573 cpu_time_now);
1574
Damjan Marione9d52d52017-03-09 15:42:26 +01001575 if (PREDICT_TRUE (is_main && vm->queue_signal_pending == 0))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001576 vm->queue_signal_callback (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001577
Damjan Marion94100532020-11-06 23:25:57 +01001578 if (__atomic_load_n (nm->pending_interrupts, __ATOMIC_ACQUIRE))
Damjan Marion0b316302020-09-09 18:55:16 +02001579 {
Damjan Marion94100532020-11-06 23:25:57 +01001580 int int_num = -1;
1581 *nm->pending_interrupts = 0;
Dave Barachd47c5092018-01-19 13:09:20 -05001582
Damjan Marion94100532020-11-06 23:25:57 +01001583 while ((int_num =
1584 clib_interrupt_get_next (nm->interrupts, int_num)) != -1)
1585 {
1586 vlib_node_runtime_t *n;
1587 clib_interrupt_clear (nm->interrupts, int_num);
1588 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1589 int_num);
1590 cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,
1591 VLIB_NODE_STATE_INTERRUPT,
1592 /* frame */ 0, cpu_time_now);
1593 }
Damjan Marion1033b492020-06-03 12:20:41 +02001594 }
1595
Dave Barache3248982018-08-14 13:47:58 -04001596 /* Input nodes may have added work to the pending vector.
1597 Process pending vector until there is nothing left.
1598 All pending vectors will be processed from input -> output. */
1599 for (i = 0; i < _vec_len (nm->pending_frames); i++)
1600 cpu_time_now = dispatch_pending_node (vm, i, cpu_time_now);
1601 /* Reset pending vector for next iteration. */
1602 _vec_len (nm->pending_frames) = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001603
Damjan Marione9d52d52017-03-09 15:42:26 +01001604 if (is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001605 {
Dave Barach900cbad2019-01-31 19:12:51 -05001606 /* *INDENT-OFF* */
1607 ELOG_TYPE_DECLARE (es) =
1608 {
1609 .format = "process tw start",
1610 .format_args = "",
1611 };
1612 ELOG_TYPE_DECLARE (ee) =
1613 {
1614 .format = "process tw end: %d",
1615 .format_args = "i4",
1616 };
1617 /* *INDENT-ON* */
1618
1619 struct
1620 {
1621 int nready_procs;
1622 } *ed;
1623
Damjan Marione9d52d52017-03-09 15:42:26 +01001624 /* Check if process nodes have expired from timing wheel. */
Dave Barach5c20a012017-06-13 08:48:31 -04001625 ASSERT (nm->data_from_advancing_timing_wheel != 0);
1626
Dave Barach900cbad2019-01-31 19:12:51 -05001627 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1628 ed = ELOG_DATA (&vlib_global_main.elog_main, es);
1629
Dave Barach5c20a012017-06-13 08:48:31 -04001630 nm->data_from_advancing_timing_wheel =
1631 TW (tw_timer_expire_timers_vec)
1632 ((TWT (tw_timer_wheel) *) nm->timing_wheel, vlib_time_now (vm),
1633 nm->data_from_advancing_timing_wheel);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001634
Damjan Marione9d52d52017-03-09 15:42:26 +01001635 ASSERT (nm->data_from_advancing_timing_wheel != 0);
Dave Barach5c20a012017-06-13 08:48:31 -04001636
Dave Barach900cbad2019-01-31 19:12:51 -05001637 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1638 {
1639 ed = ELOG_DATA (&vlib_global_main.elog_main, ee);
1640 ed->nready_procs =
1641 _vec_len (nm->data_from_advancing_timing_wheel);
1642 }
1643
Damjan Marione9d52d52017-03-09 15:42:26 +01001644 if (PREDICT_FALSE
1645 (_vec_len (nm->data_from_advancing_timing_wheel) > 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001646 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001647 uword i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001648
Damjan Marione9d52d52017-03-09 15:42:26 +01001649 for (i = 0; i < _vec_len (nm->data_from_advancing_timing_wheel);
1650 i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001651 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001652 u32 d = nm->data_from_advancing_timing_wheel[i];
1653 u32 di = vlib_timing_wheel_data_get_index (d);
1654
1655 if (vlib_timing_wheel_data_is_timed_event (d))
1656 {
1657 vlib_signal_timed_event_data_t *te =
1658 pool_elt_at_index (nm->signal_timed_event_data_pool,
1659 di);
1660 vlib_node_t *n =
1661 vlib_get_node (vm, te->process_node_index);
1662 vlib_process_t *p =
1663 vec_elt (nm->processes, n->runtime_index);
1664 void *data;
1665 data =
1666 vlib_process_signal_event_helper (nm, n, p,
1667 te->event_type_index,
1668 te->n_data_elts,
1669 te->n_data_elt_bytes);
1670 if (te->n_data_bytes < sizeof (te->inline_event_data))
Dave Barach178cf492018-11-13 16:34:13 -05001671 clib_memcpy_fast (data, te->inline_event_data,
1672 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001673 else
1674 {
Dave Barach178cf492018-11-13 16:34:13 -05001675 clib_memcpy_fast (data, te->event_data_as_vector,
1676 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001677 vec_free (te->event_data_as_vector);
1678 }
1679 pool_put (nm->signal_timed_event_data_pool, te);
1680 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001681 else
1682 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001683 cpu_time_now = clib_cpu_time_now ();
1684 cpu_time_now =
1685 dispatch_suspended_process (vm, di, cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001686 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001687 }
Damjan Marione9d52d52017-03-09 15:42:26 +01001688 _vec_len (nm->data_from_advancing_timing_wheel) = 0;
1689 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001690 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001691 vlib_increment_main_loop_counter (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001692 /* Record time stamp in case there are no enabled nodes and above
Dave Barach9b8ffd92016-07-08 08:13:45 -04001693 calls do not update time stamp. */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001694 cpu_time_now = clib_cpu_time_now ();
Dave Barach000a0292020-02-17 17:07:12 -05001695 vm->loops_this_reporting_interval++;
1696 now = clib_time_now_internal (&vm->clib_time, cpu_time_now);
1697 /* Time to update loops_per_second? */
1698 if (PREDICT_FALSE (now >= vm->loop_interval_end))
1699 {
1700 /* Next sample ends in 20ms */
1701 if (vm->loop_interval_start)
1702 {
1703 f64 this_loops_per_second;
1704
1705 this_loops_per_second =
1706 ((f64) vm->loops_this_reporting_interval) / (now -
1707 vm->loop_interval_start);
1708
1709 vm->loops_per_second =
1710 vm->loops_per_second * vm->damping_constant +
1711 (1.0 - vm->damping_constant) * this_loops_per_second;
1712 if (vm->loops_per_second != 0.0)
1713 vm->seconds_per_loop = 1.0 / vm->loops_per_second;
1714 else
1715 vm->seconds_per_loop = 0.0;
1716 }
1717 /* New interval starts now, and ends in 20ms */
1718 vm->loop_interval_start = now;
1719 vm->loop_interval_end = now + 2e-4;
1720 vm->loops_this_reporting_interval = 0;
1721 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001722 }
1723}
Dave Barach9b8ffd92016-07-08 08:13:45 -04001724
Damjan Marione9d52d52017-03-09 15:42:26 +01001725static void
1726vlib_main_loop (vlib_main_t * vm)
1727{
1728 vlib_main_or_worker_loop (vm, /* is_main */ 1);
1729}
1730
1731void
1732vlib_worker_loop (vlib_main_t * vm)
1733{
1734 vlib_main_or_worker_loop (vm, /* is_main */ 0);
1735}
1736
Damjan Marionfd8deb42021-03-06 12:26:28 +01001737vlib_global_main_t vlib_global_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001738
Damjan Marion25ab6c52021-03-05 14:41:25 +01001739void
1740vlib_add_del_post_mortem_callback (void *cb, int is_add)
1741{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001742 vlib_global_main_t *vgm = vlib_get_global_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001743 int i;
1744
1745 if (is_add == 0)
1746 {
Damjan Marionfd8deb42021-03-06 12:26:28 +01001747 for (i = vec_len (vgm->post_mortem_callbacks) - 1; i >= 0; i--)
1748 if (vgm->post_mortem_callbacks[i] == cb)
1749 vec_del1 (vgm->post_mortem_callbacks, i);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001750 return;
1751 }
1752
Damjan Marionfd8deb42021-03-06 12:26:28 +01001753 for (i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
1754 if (vgm->post_mortem_callbacks[i] == cb)
Damjan Marion25ab6c52021-03-05 14:41:25 +01001755 return;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001756 vec_add1 (vgm->post_mortem_callbacks, cb);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001757}
1758
1759static void
1760elog_post_mortem_dump (void)
1761{
Damjan Marionf553a2c2021-03-26 13:45:37 +01001762 elog_main_t *em = vlib_get_elog_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001763
1764 u8 *filename;
1765 clib_error_t *error;
1766
1767 filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
1768 error = elog_write_file (em, (char *) filename, 1 /* flush ring */);
1769 if (error)
1770 clib_error_report (error);
1771 /*
1772 * We're in the middle of crashing. Don't try to free the filename.
1773 */
1774}
1775
Ed Warnickecb9cada2015-12-08 15:45:58 -07001776static clib_error_t *
1777vlib_main_configure (vlib_main_t * vm, unformat_input_t * input)
1778{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001779 vlib_global_main_t *vgm = vlib_get_global_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001780 int turn_on_mem_trace = 0;
1781
1782 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1783 {
1784 if (unformat (input, "memory-trace"))
1785 turn_on_mem_trace = 1;
1786
1787 else if (unformat (input, "elog-events %d",
Damjan Marionfd8deb42021-03-06 12:26:28 +01001788 &vgm->configured_elog_ring_size))
1789 vgm->configured_elog_ring_size =
1790 1 << max_log2 (vgm->configured_elog_ring_size);
Dave Barach81481312017-05-16 09:08:14 -04001791 else if (unformat (input, "elog-post-mortem-dump"))
Damjan Marion25ab6c52021-03-05 14:41:25 +01001792 vlib_add_del_post_mortem_callback (elog_post_mortem_dump,
1793 /* is_add */ 1);
Dave Barachc74b43c2020-04-09 17:24:07 -04001794 else if (unformat (input, "buffer-alloc-success-rate %f",
1795 &vm->buffer_alloc_success_rate))
1796 {
1797 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1798 return clib_error_return
1799 (0, "Buffer fault injection not configured");
1800 }
1801 else if (unformat (input, "buffer-alloc-success-seed %u",
1802 &vm->buffer_alloc_success_seed))
1803 {
1804 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1805 return clib_error_return
1806 (0, "Buffer fault injection not configured");
1807 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001808 else
1809 return unformat_parse_error (input);
1810 }
1811
1812 unformat_free (input);
1813
1814 /* Enable memory trace as early as possible. */
1815 if (turn_on_mem_trace)
1816 clib_mem_trace (1);
1817
1818 return 0;
1819}
1820
1821VLIB_EARLY_CONFIG_FUNCTION (vlib_main_configure, "vlib");
1822
Dave Barach9b8ffd92016-07-08 08:13:45 -04001823static void
Dave Barach11fb09e2020-08-06 12:10:09 -04001824placeholder_queue_signal_callback (vlib_main_t * vm)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001825{
1826}
Dave Barach16c75df2016-05-31 14:05:46 -04001827
Dave Barach1f806582018-06-14 09:18:21 -04001828#define foreach_weak_reference_stub \
1829_(vlib_map_stat_segment_init) \
1830_(vpe_api_init) \
1831_(vlibmemory_init) \
1832_(map_api_segment_init)
1833
1834#define _(name) \
1835clib_error_t *name (vlib_main_t *vm) __attribute__((weak)); \
1836clib_error_t *name (vlib_main_t *vm) { return 0; }
1837foreach_weak_reference_stub;
1838#undef _
1839
Dave Barachb09f4d02019-07-15 16:00:03 -04001840void vl_api_set_elog_main (elog_main_t * m) __attribute__ ((weak));
1841void
1842vl_api_set_elog_main (elog_main_t * m)
1843{
1844 clib_warning ("STUB");
1845}
1846
1847int vl_api_set_elog_trace_api_messages (int enable) __attribute__ ((weak));
1848int
1849vl_api_set_elog_trace_api_messages (int enable)
1850{
1851 clib_warning ("STUB");
1852 return 0;
1853}
1854
1855int vl_api_get_elog_trace_api_messages (void) __attribute__ ((weak));
1856int
1857vl_api_get_elog_trace_api_messages (void)
1858{
1859 clib_warning ("STUB");
1860 return 0;
1861}
1862
Ed Warnickecb9cada2015-12-08 15:45:58 -07001863/* Main function. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001864int
Eyal Barid334a6b2016-09-19 10:23:39 +03001865vlib_main (vlib_main_t * volatile vm, unformat_input_t * input)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001866{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001867 vlib_global_main_t *vgm = vlib_get_global_main ();
Eyal Barid334a6b2016-09-19 10:23:39 +03001868 clib_error_t *volatile error;
Dave Barach5c20a012017-06-13 08:48:31 -04001869 vlib_node_main_t *nm = &vm->node_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001870
Dave Barach11fb09e2020-08-06 12:10:09 -04001871 vm->queue_signal_callback = placeholder_queue_signal_callback;
Dave Barach16c75df2016-05-31 14:05:46 -04001872
Dave Barachbc867c32020-11-25 10:07:09 -05001873 /* Reconfigure event log which is enabled very early */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001874 if (vgm->configured_elog_ring_size &&
1875 vgm->configured_elog_ring_size != vgm->elog_main.event_ring_size)
1876 elog_resize (&vgm->elog_main, vgm->configured_elog_ring_size);
Damjan Marionf553a2c2021-03-26 13:45:37 +01001877 vl_api_set_elog_main (vlib_get_elog_main ());
Dave Barachb09f4d02019-07-15 16:00:03 -04001878 (void) vl_api_set_elog_trace_api_messages (1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001879
1880 /* Default name. */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001881 if (!vgm->name)
1882 vgm->name = "VLIB";
Ed Warnickecb9cada2015-12-08 15:45:58 -07001883
Damjan Marion68b4da62018-09-30 18:26:20 +02001884 if ((error = vlib_physmem_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001885 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001886 clib_error_report (error);
1887 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001888 }
Damjan Marion49d66f12017-07-20 18:10:35 +02001889
Filip Tehlard2bbdef2019-02-22 05:05:53 -08001890 if ((error = vlib_map_stat_segment_init (vm)))
1891 {
1892 clib_error_report (error);
1893 goto done;
1894 }
1895
Damjan Marion49d66f12017-07-20 18:10:35 +02001896 if ((error = vlib_buffer_main_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001897 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001898 clib_error_report (error);
1899 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001900 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001901
1902 if ((error = vlib_thread_init (vm)))
1903 {
1904 clib_error_report (error);
1905 goto done;
1906 }
1907
Damjan Mariona31698b2021-03-10 14:35:28 +01001908 /* Register node ifunction variants */
1909 vlib_register_all_node_march_variants (vm);
1910
Ed Warnickecb9cada2015-12-08 15:45:58 -07001911 /* Register static nodes so that init functions may use them. */
1912 vlib_register_all_static_nodes (vm);
1913
1914 /* Set seed for random number generator.
1915 Allow user to specify seed to make random sequence deterministic. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001916 if (!unformat (input, "seed %wd", &vm->random_seed))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001917 vm->random_seed = clib_cpu_time_now ();
1918 clib_random_buffer_init (&vm->random_buffer, vm->random_seed);
1919
Ed Warnickecb9cada2015-12-08 15:45:58 -07001920 /* Initialize node graph. */
1921 if ((error = vlib_node_main_init (vm)))
1922 {
1923 /* Arrange for graph hook up error to not be fatal when debugging. */
1924 if (CLIB_DEBUG > 0)
1925 clib_error_report (error);
1926 else
1927 goto done;
1928 }
1929
Dave Barach1f806582018-06-14 09:18:21 -04001930 /* Direct call / weak reference, for vlib standalone use-cases */
1931 if ((error = vpe_api_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001932 {
1933 clib_error_report (error);
1934 goto done;
1935 }
1936
Dave Barach1f806582018-06-14 09:18:21 -04001937 if ((error = vlibmemory_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001938 {
1939 clib_error_report (error);
1940 goto done;
1941 }
1942
Dave Barach1f806582018-06-14 09:18:21 -04001943 if ((error = map_api_segment_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001944 {
1945 clib_error_report (error);
1946 goto done;
1947 }
1948
Ole Troan964f93e2016-06-10 13:22:36 +02001949 /* See unix/main.c; most likely already set up */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001950 if (vgm->init_functions_called == 0)
1951 vgm->init_functions_called = hash_create (0, /* value bytes */ 0);
Ole Troan964f93e2016-06-10 13:22:36 +02001952 if ((error = vlib_call_all_init_functions (vm)))
1953 goto done;
1954
Dave Barach5c20a012017-06-13 08:48:31 -04001955 nm->timing_wheel = clib_mem_alloc_aligned (sizeof (TWT (tw_timer_wheel)),
1956 CLIB_CACHE_LINE_BYTES);
1957
1958 vec_validate (nm->data_from_advancing_timing_wheel, 10);
1959 _vec_len (nm->data_from_advancing_timing_wheel) = 0;
1960
1961 /* Create the process timing wheel */
1962 TW (tw_timer_wheel_init) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
1963 0 /* no callback */ ,
1964 10e-6 /* timer period 10us */ ,
1965 ~0 /* max expirations per call */ );
1966
Dave Barach2877eee2017-12-15 12:22:57 -05001967 vec_validate (vm->pending_rpc_requests, 0);
1968 _vec_len (vm->pending_rpc_requests) = 0;
Dave Barachf6c68d72018-11-01 08:12:52 -04001969 vec_validate (vm->processing_rpc_requests, 0);
1970 _vec_len (vm->processing_rpc_requests) = 0;
Dave Barach2877eee2017-12-15 12:22:57 -05001971
Dave Barachc74b43c2020-04-09 17:24:07 -04001972 /* Default params for the buffer allocator fault injector, if configured */
1973 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
1974 {
1975 vm->buffer_alloc_success_seed = 0xdeaddabe;
1976 vm->buffer_alloc_success_rate = 0.80;
1977 }
1978
Dave Barachd1e17d02019-03-21 18:01:48 -04001979 if ((error = vlib_call_all_config_functions (vm, input, 0 /* is_early */ )))
1980 goto done;
1981
Dave Barach000a0292020-02-17 17:07:12 -05001982 /*
1983 * Use exponential smoothing, with a half-life of 1 second
1984 * reported_rate(t) = reported_rate(t-1) * K + rate(t)*(1-K)
1985 *
1986 * Sample every 20ms, aka 50 samples per second
1987 * K = exp (-1.0/20.0);
1988 * K = 0.95
1989 */
1990 vm->damping_constant = exp (-1.0 / 20.0);
1991
Dave Barachc602b382019-06-03 19:48:22 -04001992 /* Sort per-thread init functions before we start threads */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001993 vlib_sort_init_exit_functions (&vgm->worker_init_function_registrations);
Dave Barachc602b382019-06-03 19:48:22 -04001994
Dave Barachd1e17d02019-03-21 18:01:48 -04001995 /* Call all main loop enter functions. */
1996 {
1997 clib_error_t *sub_error;
1998 sub_error = vlib_call_all_main_loop_enter_functions (vm);
1999 if (sub_error)
2000 clib_error_report (sub_error);
2001 }
2002
Ed Warnickecb9cada2015-12-08 15:45:58 -07002003 switch (clib_setjmp (&vm->main_loop_exit, VLIB_MAIN_LOOP_EXIT_NONE))
2004 {
2005 case VLIB_MAIN_LOOP_EXIT_NONE:
2006 vm->main_loop_exit_set = 1;
2007 break;
2008
2009 case VLIB_MAIN_LOOP_EXIT_CLI:
2010 goto done;
2011
2012 default:
2013 error = vm->main_loop_error;
2014 goto done;
2015 }
2016
Ed Warnickecb9cada2015-12-08 15:45:58 -07002017 vlib_main_loop (vm);
2018
Dave Barach9b8ffd92016-07-08 08:13:45 -04002019done:
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302020 vlib_worker_thread_barrier_sync (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002021 /* Call all exit functions. */
2022 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04002023 clib_error_t *sub_error;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002024 sub_error = vlib_call_all_main_loop_exit_functions (vm);
2025 if (sub_error)
2026 clib_error_report (sub_error);
2027 }
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302028 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002029
2030 if (error)
2031 clib_error_report (error);
2032
Pierre Pfisterc26cc722021-09-10 16:38:03 +02002033 return vm->main_loop_exit_status;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002034}
Dave Barach9b8ffd92016-07-08 08:13:45 -04002035
Dave Barachab1a50c2020-10-06 14:08:16 -04002036vlib_main_t *
2037vlib_get_main_not_inline (void)
2038{
2039 return vlib_get_main ();
2040}
2041
2042elog_main_t *
2043vlib_get_elog_main_not_inline ()
2044{
2045 return &vlib_global_main.elog_main;
2046}
2047
Pierre Pfisterc26cc722021-09-10 16:38:03 +02002048void
2049vlib_exit_with_status (vlib_main_t *vm, int status)
2050{
2051 vm->main_loop_exit_status = status;
2052 __atomic_store_n (&vm->main_loop_exit_now, 1, __ATOMIC_RELEASE);
2053}
2054
Dave Barach9b8ffd92016-07-08 08:13:45 -04002055/*
2056 * fd.io coding-style-patch-verification: ON
2057 *
2058 * Local Variables:
2059 * eval: (c-set-style "gnu")
2060 * End:
2061 */