blob: 41d18e2dfa64a7e125d9d7f8ce3c32d3aef0e58f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * main.c: main vector processing loop
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <math.h>
41#include <vppinfra/format.h>
42#include <vlib/vlib.h>
43#include <vlib/threads.h>
Damjan Marion8973b072022-03-01 15:51:18 +010044#include <vlib/stats/stats.h>
Dave Barach5c20a012017-06-13 08:48:31 -040045#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070046
Damjan Marion04a7f052017-07-10 15:06:17 +020047#include <vlib/unix/unix.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070048
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#define VLIB_FRAME_MAGIC (0xabadc0ed)
Ed Warnickecb9cada2015-12-08 15:45:58 -070050
51always_inline u32 *
52vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node)
53{
Damjan Marionb32bd702021-12-23 17:05:02 +010054 return (void *) f + node->magic_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -070055}
56
Andreas Schultz58b2eb12019-07-15 15:40:56 +020057static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -040058vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index,
59 u32 frame_flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -070060{
Dave Barach9b8ffd92016-07-08 08:13:45 -040061 vlib_node_main_t *nm = &vm->node_main;
62 vlib_frame_size_t *fs;
63 vlib_node_t *to_node;
64 vlib_frame_t *f;
Damjan Marionb32bd702021-12-23 17:05:02 +010065 u32 l, n;
Ed Warnickecb9cada2015-12-08 15:45:58 -070066
Dave Baracha8f4ebd2021-02-08 07:56:22 -050067 ASSERT (vm == vlib_get_main ());
68
Ed Warnickecb9cada2015-12-08 15:45:58 -070069 to_node = vlib_get_node (vm, to_node_index);
70
Damjan Marionb32bd702021-12-23 17:05:02 +010071 vec_validate (nm->frame_sizes, to_node->frame_size_index);
72 fs = vec_elt_at_index (nm->frame_sizes, to_node->frame_size_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -070073
Damjan Marionb32bd702021-12-23 17:05:02 +010074 if (fs->frame_size == 0)
75 fs->frame_size = to_node->frame_size;
76 else
77 ASSERT (fs->frame_size == to_node->frame_size);
78
79 n = fs->frame_size;
Andreas Schultz58b2eb12019-07-15 15:40:56 +020080 if ((l = vec_len (fs->free_frames)) > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 {
82 /* Allocate from end of free list. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +020083 f = fs->free_frames[l - 1];
Damjan Marion8bea5892022-04-04 22:40:45 +020084 vec_set_len (fs->free_frames, l - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -070085 }
86 else
87 {
Damjan Marionb32bd702021-12-23 17:05:02 +010088 f = clib_mem_alloc_aligned_no_fail (n, CLIB_CACHE_LINE_BYTES);
Ed Warnickecb9cada2015-12-08 15:45:58 -070089 }
90
91 /* Poison frame when debugging. */
92 if (CLIB_DEBUG > 0)
Damjan Marionb32bd702021-12-23 17:05:02 +010093 clib_memset_u8 (f, 0xfe, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -070094
95 /* Insert magic number. */
96 {
Dave Barach9b8ffd92016-07-08 08:13:45 -040097 u32 *magic;
Ed Warnickecb9cada2015-12-08 15:45:58 -070098
99 magic = vlib_frame_find_magic (f, to_node);
100 *magic = VLIB_FRAME_MAGIC;
101 }
102
Damjan Marion633b6fd2018-09-14 14:38:53 +0200103 f->frame_flags = VLIB_FRAME_IS_ALLOCATED | frame_flags;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104 f->n_vectors = 0;
Damjan Marionb32bd702021-12-23 17:05:02 +0100105 f->scalar_offset = to_node->scalar_offset;
106 f->vector_offset = to_node->vector_offset;
107 f->aux_offset = to_node->aux_offset;
Damjan Mariona3d59862018-11-10 10:23:00 +0100108 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700109
110 fs->n_alloc_frames += 1;
111
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200112 return f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700113}
114
115/* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX.
116 Returns frame index. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200117static vlib_frame_t *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400118vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime,
119 u32 to_next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400121 vlib_node_t *from_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122
123 from_node = vlib_get_node (vm, from_node_runtime->node_index);
124 ASSERT (to_next_index < vec_len (from_node->next_nodes));
125
Dave Barach9b8ffd92016-07-08 08:13:45 -0400126 return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index],
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127 /* frame_flags */ 0);
128}
129
130vlib_frame_t *
131vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index)
132{
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200133 vlib_frame_t *f = vlib_frame_alloc_to_node (vm, to_node_index,
134 /* frame_flags */
135 VLIB_FRAME_FREE_AFTER_DISPATCH);
136 return vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137}
138
Dave Barachc74b43c2020-04-09 17:24:07 -0400139static inline void
140vlib_validate_frame_indices (vlib_frame_t * f)
141{
142 if (CLIB_DEBUG > 0)
143 {
144 int i;
145 u32 *from = vlib_frame_vector_args (f);
146
147 /* Check for bad buffer index values */
148 for (i = 0; i < f->n_vectors; i++)
149 {
150 if (from[i] == 0)
151 {
152 clib_warning ("BUG: buffer index 0 at index %d", i);
153 ASSERT (0);
154 }
155 else if (from[i] == 0xfefefefe)
156 {
157 clib_warning ("BUG: frame poison pattern at index %d", i);
158 ASSERT (0);
159 }
160 }
161 }
162}
163
Dave Barach9b8ffd92016-07-08 08:13:45 -0400164void
165vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400167 vlib_pending_frame_t *p;
168 vlib_node_t *to_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700169
170 if (f->n_vectors == 0)
171 return;
172
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500173 ASSERT (vm == vlib_get_main ());
174
Dave Barachc74b43c2020-04-09 17:24:07 -0400175 vlib_validate_frame_indices (f);
176
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177 to_node = vlib_get_node (vm, to_node_index);
178
179 vec_add2 (vm->node_main.pending_frames, p, 1);
180
Damjan Marion633b6fd2018-09-14 14:38:53 +0200181 f->frame_flags |= VLIB_FRAME_PENDING;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200182 p->frame = vlib_get_frame (vm, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183 p->node_runtime_index = to_node->runtime_index;
184 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
185}
186
187/* Free given frame. */
188void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400189vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700190{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400191 vlib_node_main_t *nm = &vm->node_main;
192 vlib_node_t *node;
193 vlib_frame_size_t *fs;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400194
Dave Baracha8f4ebd2021-02-08 07:56:22 -0500195 ASSERT (vm == vlib_get_main ());
Damjan Marion633b6fd2018-09-14 14:38:53 +0200196 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700197
198 node = vlib_get_node (vm, r->node_index);
Damjan Marionb32bd702021-12-23 17:05:02 +0100199 fs = vec_elt_at_index (nm->frame_sizes, node->frame_size_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700200
Damjan Marion633b6fd2018-09-14 14:38:53 +0200201 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700202
203 /* No next frames may point to freed frame. */
204 if (CLIB_DEBUG > 0)
205 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400206 vlib_next_frame_t *nf;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200207 vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame != f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208 }
209
Damjan Marion296988d2019-02-21 20:24:54 +0100210 f->frame_flags &= ~(VLIB_FRAME_IS_ALLOCATED | VLIB_FRAME_NO_APPEND);
Stanislav Zaikin3791a032022-03-31 14:16:28 +0200211 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200213 vec_add1 (fs->free_frames, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700214 ASSERT (fs->n_alloc_frames > 0);
215 fs->n_alloc_frames -= 1;
216}
217
218static clib_error_t *
219show_frame_stats (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400220 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400222 vlib_frame_size_t *fs;
223
Damjan Marionb32bd702021-12-23 17:05:02 +0100224 vlib_cli_output (vm, "%=8s%=6s%=12s%=12s", "Thread", "Size", "# Alloc",
225 "# Free");
226 foreach_vlib_main ()
227 {
228 vlib_node_main_t *nm = &this_vlib_main->node_main;
229 vec_foreach (fs, nm->frame_sizes)
230 {
231 u32 n_alloc = fs->n_alloc_frames;
232 u32 n_free = vec_len (fs->free_frames);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233
Damjan Marionb32bd702021-12-23 17:05:02 +0100234 if (n_alloc + n_free > 0)
235 vlib_cli_output (vm, "%=8d%=6d%=12d%=12d",
236 this_vlib_main->thread_index, fs->frame_size,
237 n_alloc, n_free);
238 }
239 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240
241 return 0;
242}
243
Dave Barach9b8ffd92016-07-08 08:13:45 -0400244/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700245VLIB_CLI_COMMAND (show_frame_stats_cli, static) = {
246 .path = "show vlib frame-allocation",
247 .short_help = "Show node dispatch frame statistics",
248 .function = show_frame_stats,
249};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400250/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251
252/* Change ownership of enqueue rights to given next node. */
253static void
254vlib_next_frame_change_ownership (vlib_main_t * vm,
255 vlib_node_runtime_t * node_runtime,
256 u32 next_index)
257{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400258 vlib_node_main_t *nm = &vm->node_main;
259 vlib_next_frame_t *next_frame;
260 vlib_node_t *node, *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261
262 node = vec_elt (nm->nodes, node_runtime->node_index);
263
264 /* Only internal & input nodes are allowed to call other nodes. */
265 ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL
266 || node->type == VLIB_NODE_TYPE_INPUT
267 || node->type == VLIB_NODE_TYPE_PROCESS);
268
269 ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes);
270
Dave Barach9b8ffd92016-07-08 08:13:45 -0400271 next_frame =
272 vlib_node_runtime_get_next_frame (vm, node_runtime, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273 next_node = vec_elt (nm->nodes, node->next_nodes[next_index]);
274
275 if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX)
276 {
277 /* Get frame from previous owner. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400278 vlib_next_frame_t *owner_next_frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279 vlib_next_frame_t tmp;
280
281 owner_next_frame =
282 vlib_node_get_next_frame (vm,
283 next_node->owner_node_index,
284 next_node->owner_next_index);
285
286 /* Swap target next frame with owner's. */
287 tmp = owner_next_frame[0];
288 owner_next_frame[0] = next_frame[0];
289 next_frame[0] = tmp;
290
291 /*
292 * If next_frame is already pending, we have to track down
293 * all pending frames and fix their next_frame_index fields.
294 */
295 if (next_frame->flags & VLIB_FRAME_PENDING)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400296 {
297 vlib_pending_frame_t *p;
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200298 if (next_frame->frame != NULL)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400299 {
300 vec_foreach (p, nm->pending_frames)
301 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200302 if (p->frame == next_frame->frame)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400303 {
304 p->next_frame_index =
305 next_frame - vm->node_main.next_frames;
306 }
307 }
308 }
309 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310 }
311 else
312 {
313 /* No previous owner. Take ownership. */
314 next_frame->flags |= VLIB_FRAME_OWNER;
315 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400316
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317 /* Record new owner. */
318 next_node->owner_node_index = node->index;
319 next_node->owner_next_index = next_index;
320
321 /* Now we should be owner. */
322 ASSERT (next_frame->flags & VLIB_FRAME_OWNER);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400323}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324
325/* Make sure that magic number is still there.
326 Otherwise, it is likely that caller has overrun frame arguments. */
327always_inline void
328validate_frame_magic (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400329 vlib_frame_t * f, vlib_node_t * n, uword next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400331 vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]);
332 u32 *magic = vlib_frame_find_magic (f, next_node);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333 ASSERT (VLIB_FRAME_MAGIC == magic[0]);
334}
335
336vlib_frame_t *
337vlib_get_next_frame_internal (vlib_main_t * vm,
338 vlib_node_runtime_t * node,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400339 u32 next_index, u32 allocate_new_next_frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700340{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400341 vlib_frame_t *f;
342 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700343 u32 n_used;
344
345 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
346
347 /* Make sure this next frame owns right to enqueue to destination frame. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400348 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700349 vlib_next_frame_change_ownership (vm, node, next_index);
350
351 /* ??? Don't need valid flag: can use frame_index == ~0 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400352 if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED)))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200354 nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700355 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
356 }
357
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200358 f = nf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700359
360 /* Has frame been removed from pending vector (e.g. finished dispatching)?
361 If so we can reuse frame. */
Damjan Marion633b6fd2018-09-14 14:38:53 +0200362 if ((nf->flags & VLIB_FRAME_PENDING)
363 && !(f->frame_flags & VLIB_FRAME_PENDING))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364 {
365 nf->flags &= ~VLIB_FRAME_PENDING;
366 f->n_vectors = 0;
Damjan Marion9162c2d2018-11-20 09:55:10 +0100367 f->flags = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368 }
369
Damjan Marion296988d2019-02-21 20:24:54 +0100370 /* Allocate new frame if current one is marked as no-append or
371 it is already full. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700372 n_used = f->n_vectors;
Damjan Marion296988d2019-02-21 20:24:54 +0100373 if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0) ||
374 (f->frame_flags & VLIB_FRAME_NO_APPEND))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700375 {
376 /* Old frame may need to be freed after dispatch, since we'll have
Dave Barach9b8ffd92016-07-08 08:13:45 -0400377 two redundant frames from node -> next node. */
378 if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700379 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200380 vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame);
Damjan Marion633b6fd2018-09-14 14:38:53 +0200381 f_old->frame_flags |= VLIB_FRAME_FREE_AFTER_DISPATCH;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700382 }
383
384 /* Allocate new frame to replace full one. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200385 f = nf->frame = vlib_frame_alloc (vm, node, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700386 n_used = f->n_vectors;
387 }
388
389 /* Should have free vectors in frame now. */
390 ASSERT (n_used < VLIB_FRAME_SIZE);
391
392 if (CLIB_DEBUG > 0)
393 {
394 validate_frame_magic (vm, f,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400395 vlib_get_node (vm, node->node_index), next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700396 }
397
398 return f;
399}
400
401static void
402vlib_put_next_frame_validate (vlib_main_t * vm,
403 vlib_node_runtime_t * rt,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400404 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700405{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400406 vlib_node_main_t *nm = &vm->node_main;
407 vlib_next_frame_t *nf;
408 vlib_frame_t *f;
409 vlib_node_runtime_t *next_rt;
410 vlib_node_t *next_node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700411 u32 n_before, n_after;
412
413 nf = vlib_node_runtime_get_next_frame (vm, rt, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200414 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700415
416 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
Dave Barachc74b43c2020-04-09 17:24:07 -0400417
418 vlib_validate_frame_indices (f);
419
Ed Warnickecb9cada2015-12-08 15:45:58 -0700420 n_after = VLIB_FRAME_SIZE - n_vectors_left;
421 n_before = f->n_vectors;
422
423 ASSERT (n_after >= n_before);
424
425 next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
426 nf->node_runtime_index);
427 next_node = vlib_get_node (vm, next_rt->node_index);
428 if (n_after > 0 && next_node->validate_frame)
429 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400430 u8 *msg = next_node->validate_frame (vm, rt, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700431 if (msg)
432 {
433 clib_warning ("%v", msg);
434 ASSERT (0);
435 }
436 vec_free (msg);
437 }
438}
439
440void
441vlib_put_next_frame (vlib_main_t * vm,
442 vlib_node_runtime_t * r,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400443 u32 next_index, u32 n_vectors_left)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700444{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400445 vlib_node_main_t *nm = &vm->node_main;
446 vlib_next_frame_t *nf;
447 vlib_frame_t *f;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700448 u32 n_vectors_in_frame;
449
Damjan Marion910d3692019-01-21 11:48:34 +0100450 if (CLIB_DEBUG > 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700451 vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left);
452
453 nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200454 f = vlib_get_frame (vm, nf->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455
456 /* Make sure that magic number is still there. Otherwise, caller
457 has overrun frame meta data. */
458 if (CLIB_DEBUG > 0)
459 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400460 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700461 validate_frame_magic (vm, f, node, next_index);
462 }
463
464 /* Convert # of vectors left -> number of vectors there. */
465 ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
466 n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left;
467
468 f->n_vectors = n_vectors_in_frame;
469
470 /* If vectors were added to frame, add to pending vector. */
471 if (PREDICT_TRUE (n_vectors_in_frame > 0))
472 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400473 vlib_pending_frame_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700474 u32 v0, v1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400475
Ed Warnickecb9cada2015-12-08 15:45:58 -0700476 r->cached_next_index = next_index;
477
Damjan Marion633b6fd2018-09-14 14:38:53 +0200478 if (!(f->frame_flags & VLIB_FRAME_PENDING))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400479 {
480 __attribute__ ((unused)) vlib_node_t *node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700481
Dave Barach9b8ffd92016-07-08 08:13:45 -0400482 node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700483
Dave Barach9b8ffd92016-07-08 08:13:45 -0400484 vec_add2 (nm->pending_frames, p, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200486 p->frame = nf->frame;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400487 p->node_runtime_index = nf->node_runtime_index;
488 p->next_frame_index = nf - nm->next_frames;
489 nf->flags |= VLIB_FRAME_PENDING;
Damjan Marion633b6fd2018-09-14 14:38:53 +0200490 f->frame_flags |= VLIB_FRAME_PENDING;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400491 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700492
493 /* Copy trace flag from next_frame and from runtime. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400494 nf->flags |=
495 (nf->flags & VLIB_NODE_FLAG_TRACE) | (r->
496 flags & VLIB_NODE_FLAG_TRACE);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700497
498 v0 = nf->vectors_since_last_overflow;
499 v1 = v0 + n_vectors_in_frame;
500 nf->vectors_since_last_overflow = v1;
501 if (PREDICT_FALSE (v1 < v0))
502 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400503 vlib_node_t *node = vlib_get_node (vm, r->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700504 vec_elt (node->n_vectors_by_next_node, next_index) += v0;
505 }
506 }
507}
508
509/* Sync up runtime (32 bit counters) and main node stats (64 bit counters). */
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800510void
511vlib_node_runtime_sync_stats_node (vlib_node_t *n, vlib_node_runtime_t *r,
512 uword n_calls, uword n_vectors,
513 uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700514{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700515 n->stats_total.calls += n_calls + r->calls_since_last_overflow;
516 n->stats_total.vectors += n_vectors + r->vectors_since_last_overflow;
517 n->stats_total.clocks += n_clocks + r->clocks_since_last_overflow;
518 n->stats_total.max_clock = r->max_clock;
519 n->stats_total.max_clock_n = r->max_clock_n;
520
521 r->calls_since_last_overflow = 0;
522 r->vectors_since_last_overflow = 0;
523 r->clocks_since_last_overflow = 0;
524}
525
Arthur de Kerhor156158f2021-02-18 03:09:42 -0800526void
527vlib_node_runtime_sync_stats (vlib_main_t *vm, vlib_node_runtime_t *r,
528 uword n_calls, uword n_vectors, uword n_clocks)
529{
530 vlib_node_t *n = vlib_get_node (vm, r->node_index);
531 vlib_node_runtime_sync_stats_node (n, r, n_calls, n_vectors, n_clocks);
532}
533
Dave Barach9b8ffd92016-07-08 08:13:45 -0400534always_inline void __attribute__ ((unused))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700535vlib_process_sync_stats (vlib_main_t * vm,
536 vlib_process_t * p,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000537 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700538{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400539 vlib_node_runtime_t *rt = &p->node_runtime;
540 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000541 vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700542 n->stats_total.suspends += p->n_suspends;
543 p->n_suspends = 0;
544}
545
Dave Barach9b8ffd92016-07-08 08:13:45 -0400546void
547vlib_node_sync_stats (vlib_main_t * vm, vlib_node_t * n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700548{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400549 vlib_node_runtime_t *rt;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700550
551 if (n->type == VLIB_NODE_TYPE_PROCESS)
552 {
553 /* Nothing to do for PROCESS nodes except in main thread */
Damjan Marionfd8deb42021-03-06 12:26:28 +0100554 if (vm != vlib_get_first_main ())
Dave Barach9b8ffd92016-07-08 08:13:45 -0400555 return;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700556
Dave Barach9b8ffd92016-07-08 08:13:45 -0400557 vlib_process_t *p = vlib_get_process_from_node (vm, n);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700558 n->stats_total.suspends += p->n_suspends;
559 p->n_suspends = 0;
560 rt = &p->node_runtime;
561 }
562 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400563 rt =
564 vec_elt_at_index (vm->node_main.nodes_by_type[n->type],
565 n->runtime_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700566
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000567 vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700568
569 /* Sync up runtime next frame vector counters with main node structure. */
570 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400571 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700572 uword i;
573 for (i = 0; i < rt->n_next_nodes; i++)
574 {
575 nf = vlib_node_runtime_get_next_frame (vm, rt, i);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400576 vec_elt (n->n_vectors_by_next_node, i) +=
577 nf->vectors_since_last_overflow;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700578 nf->vectors_since_last_overflow = 0;
579 }
580 }
581}
582
583always_inline u32
584vlib_node_runtime_update_stats (vlib_main_t * vm,
585 vlib_node_runtime_t * node,
586 uword n_calls,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000587 uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700588{
589 u32 ca0, ca1, v0, v1, cl0, cl1, r;
590
591 cl0 = cl1 = node->clocks_since_last_overflow;
592 ca0 = ca1 = node->calls_since_last_overflow;
593 v0 = v1 = node->vectors_since_last_overflow;
594
595 ca1 = ca0 + n_calls;
596 v1 = v0 + n_vectors;
597 cl1 = cl0 + n_clocks;
598
599 node->calls_since_last_overflow = ca1;
600 node->clocks_since_last_overflow = cl1;
601 node->vectors_since_last_overflow = v1;
Dave Barach4d1a8662018-09-10 12:31:15 -0400602
Ed Warnickecb9cada2015-12-08 15:45:58 -0700603 node->max_clock_n = node->max_clock > n_clocks ?
Dave Barach9b8ffd92016-07-08 08:13:45 -0400604 node->max_clock_n : n_vectors;
605 node->max_clock = node->max_clock > n_clocks ? node->max_clock : n_clocks;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700606
607 r = vlib_node_runtime_update_main_loop_vector_stats (vm, node, n_vectors);
608
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000609 if (PREDICT_FALSE (ca1 < ca0 || v1 < v0 || cl1 < cl0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700610 {
611 node->calls_since_last_overflow = ca0;
612 node->clocks_since_last_overflow = cl0;
613 node->vectors_since_last_overflow = v0;
Dave Barach4d1a8662018-09-10 12:31:15 -0400614
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000615 vlib_node_runtime_sync_stats (vm, node, n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700616 }
617
618 return r;
619}
620
Dave Barach17e5d802019-05-01 11:30:13 -0400621always_inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700622vlib_process_update_stats (vlib_main_t * vm,
623 vlib_process_t * p,
Dave Barachec595ef2019-01-24 10:34:24 -0500624 uword n_calls, uword n_vectors, uword n_clocks)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700625{
626 vlib_node_runtime_update_stats (vm, &p->node_runtime,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000627 n_calls, n_vectors, n_clocks);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700628}
629
630static clib_error_t *
631vlib_cli_elog_clear (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400632 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700633{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100634 elog_reset_buffer (&vlib_global_main.elog_main);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700635 return 0;
636}
637
Dave Barach9b8ffd92016-07-08 08:13:45 -0400638/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700639VLIB_CLI_COMMAND (elog_clear_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400640 .path = "event-logger clear",
641 .short_help = "Clear the event log",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700642 .function = vlib_cli_elog_clear,
643};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400644/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700645
646#ifdef CLIB_UNIX
647static clib_error_t *
648elog_save_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400649 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700650{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100651 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400652 char *file, *chroot_file;
653 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700654
Dave Barach9b8ffd92016-07-08 08:13:45 -0400655 if (!unformat (input, "%s", &file))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700656 {
657 vlib_cli_output (vm, "expected file name, got `%U'",
658 format_unformat_error, input);
659 return 0;
660 }
661
662 /* It's fairly hard to get "../oopsie" through unformat; just in case */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400663 if (strstr (file, "..") || index (file, '/'))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700664 {
665 vlib_cli_output (vm, "illegal characters in filename '%s'", file);
666 return 0;
667 }
668
669 chroot_file = (char *) format (0, "/tmp/%s%c", file, 0);
670
Dave Barach9b8ffd92016-07-08 08:13:45 -0400671 vec_free (file);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700672
673 vlib_cli_output (vm, "Saving %wd of %wd events to %s",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400674 elog_n_events_in_buffer (em),
675 elog_buffer_capacity (em), chroot_file);
676
Ed Warnickecb9cada2015-12-08 15:45:58 -0700677 vlib_worker_thread_barrier_sync (vm);
Dave Barach903fd512017-04-01 11:07:40 -0400678 error = elog_write_file (em, chroot_file, 1 /* flush ring */ );
Dave Barach9b8ffd92016-07-08 08:13:45 -0400679 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700680 vec_free (chroot_file);
681 return error;
682}
683
Dave Barach81481312017-05-16 09:08:14 -0400684void
Dave Barach27d978c2020-11-03 09:59:06 -0500685vlib_post_mortem_dump (void)
Dave Barach81481312017-05-16 09:08:14 -0400686{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100687 vlib_global_main_t *vgm = vlib_get_global_main ();
Dave Barach27d978c2020-11-03 09:59:06 -0500688
Damjan Marionfd8deb42021-03-06 12:26:28 +0100689 for (int i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
690 (vgm->post_mortem_callbacks[i]) ();
Dave Barach81481312017-05-16 09:08:14 -0400691}
692
Dave Barach9b8ffd92016-07-08 08:13:45 -0400693/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700694VLIB_CLI_COMMAND (elog_save_cli, static) = {
Dave Barache5389bb2016-03-28 17:12:19 -0400695 .path = "event-logger save",
696 .short_help = "event-logger save <filename> (saves log in /tmp/<filename>)",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700697 .function = elog_save_buffer,
698};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400699/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700700
Dave Barache5389bb2016-03-28 17:12:19 -0400701static clib_error_t *
702elog_stop (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400703 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400704{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100705 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400706
707 em->n_total_events_disable_limit = em->n_total_events;
708
709 vlib_cli_output (vm, "Stopped the event logger...");
710 return 0;
711}
712
Dave Barach9b8ffd92016-07-08 08:13:45 -0400713/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400714VLIB_CLI_COMMAND (elog_stop_cli, static) = {
715 .path = "event-logger stop",
716 .short_help = "Stop the event-logger",
717 .function = elog_stop,
718};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400719/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400720
721static clib_error_t *
722elog_restart (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400723 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400724{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100725 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400726
727 em->n_total_events_disable_limit = ~0;
728
729 vlib_cli_output (vm, "Restarted the event logger...");
730 return 0;
731}
732
Dave Barach9b8ffd92016-07-08 08:13:45 -0400733/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400734VLIB_CLI_COMMAND (elog_restart_cli, static) = {
735 .path = "event-logger restart",
736 .short_help = "Restart the event-logger",
737 .function = elog_restart,
738};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400739/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400740
741static clib_error_t *
Dave Barachbc867c32020-11-25 10:07:09 -0500742elog_resize_command_fn (vlib_main_t * vm,
743 unformat_input_t * input, vlib_cli_command_t * cmd)
Dave Barache5389bb2016-03-28 17:12:19 -0400744{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100745 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barache5389bb2016-03-28 17:12:19 -0400746 u32 tmp;
747
748 /* Stop the parade */
Damjan Marionf553a2c2021-03-26 13:45:37 +0100749 elog_reset_buffer (em);
Dave Barache5389bb2016-03-28 17:12:19 -0400750
751 if (unformat (input, "%d", &tmp))
752 {
753 elog_alloc (em, tmp);
754 em->n_total_events_disable_limit = ~0;
755 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400756 else
Dave Barache5389bb2016-03-28 17:12:19 -0400757 return clib_error_return (0, "Must specify how many events in the ring");
758
759 vlib_cli_output (vm, "Resized ring and restarted the event logger...");
760 return 0;
761}
762
Dave Barach9b8ffd92016-07-08 08:13:45 -0400763/* *INDENT-OFF* */
Dave Barache5389bb2016-03-28 17:12:19 -0400764VLIB_CLI_COMMAND (elog_resize_cli, static) = {
765 .path = "event-logger resize",
766 .short_help = "event-logger resize <nnn>",
Dave Barachbc867c32020-11-25 10:07:09 -0500767 .function = elog_resize_command_fn,
Dave Barache5389bb2016-03-28 17:12:19 -0400768};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400769/* *INDENT-ON* */
Dave Barache5389bb2016-03-28 17:12:19 -0400770
Ed Warnickecb9cada2015-12-08 15:45:58 -0700771#endif /* CLIB_UNIX */
772
Dave Barach9b8ffd92016-07-08 08:13:45 -0400773static void
774elog_show_buffer_internal (vlib_main_t * vm, u32 n_events_to_show)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700775{
Damjan Marionf553a2c2021-03-26 13:45:37 +0100776 elog_main_t *em = &vlib_global_main.elog_main;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400777 elog_event_t *e, *es;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700778 f64 dt;
779
780 /* Show events in VLIB time since log clock starts after VLIB clock. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400781 dt = (em->init_time.cpu - vm->clib_time.init_cpu_time)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700782 * vm->clib_time.seconds_per_clock;
783
784 es = elog_peek_events (em);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400785 vlib_cli_output (vm, "%d of %d events in buffer, logger %s", vec_len (es),
786 em->event_ring_size,
787 em->n_total_events < em->n_total_events_disable_limit ?
788 "running" : "stopped");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700789 vec_foreach (e, es)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400790 {
791 vlib_cli_output (vm, "%18.9f: %U",
792 e->time + dt, format_elog_event, em, e);
793 n_events_to_show--;
794 if (n_events_to_show == 0)
795 break;
796 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700797 vec_free (es);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400798
Ed Warnickecb9cada2015-12-08 15:45:58 -0700799}
800
801static clib_error_t *
802elog_show_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400803 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700804{
805 u32 n_events_to_show;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400806 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700807
808 n_events_to_show = 250;
809 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
810 {
811 if (unformat (input, "%d", &n_events_to_show))
812 ;
813 else if (unformat (input, "all"))
814 n_events_to_show = ~0;
815 else
816 return unformat_parse_error (input);
817 }
818 elog_show_buffer_internal (vm, n_events_to_show);
819 return error;
820}
821
Dave Barach9b8ffd92016-07-08 08:13:45 -0400822/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700823VLIB_CLI_COMMAND (elog_show_cli, static) = {
824 .path = "show event-logger",
825 .short_help = "Show event logger info",
826 .function = elog_show_buffer,
827};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400828/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700829
Dave Barach9b8ffd92016-07-08 08:13:45 -0400830void
831vlib_gdb_show_event_log (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700832{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400833 elog_show_buffer_internal (vlib_get_main (), (u32) ~ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700834}
835
Dave Barachfb6e59d2016-03-26 18:45:42 -0400836static inline void
Ed Warnickecb9cada2015-12-08 15:45:58 -0700837vlib_elog_main_loop_event (vlib_main_t * vm,
838 u32 node_index,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400839 u64 time, u32 n_vectors, u32 is_return)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700840{
Damjan Marionfd8deb42021-03-06 12:26:28 +0100841 vlib_main_t *evm = vlib_get_first_main ();
Damjan Marionf553a2c2021-03-26 13:45:37 +0100842 elog_main_t *em = vlib_get_elog_main ();
Dave Barach900cbad2019-01-31 19:12:51 -0500843 int enabled = evm->elog_trace_graph_dispatch |
844 evm->elog_trace_graph_circuit;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700845
Dave Barach900cbad2019-01-31 19:12:51 -0500846 if (PREDICT_FALSE (enabled && n_vectors))
847 {
848 if (PREDICT_FALSE (!elog_is_enabled (em)))
849 {
850 evm->elog_trace_graph_dispatch = 0;
851 evm->elog_trace_graph_circuit = 0;
852 return;
853 }
854 if (PREDICT_TRUE
855 (evm->elog_trace_graph_dispatch ||
856 (evm->elog_trace_graph_circuit &&
857 node_index == evm->elog_trace_graph_circuit_node_index)))
858 {
859 elog_track (em,
860 /* event type */
861 vec_elt_at_index (is_return
862 ? evm->node_return_elog_event_types
863 : evm->node_call_elog_event_types,
864 node_index),
865 /* track */
866 (vm->thread_index ?
867 &vlib_worker_threads[vm->thread_index].elog_track
868 : &em->default_track),
869 /* data to log */ n_vectors);
870 }
871 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700872}
873
Dave Barach7bee7732017-10-18 18:48:11 -0400874static inline void
875add_trajectory_trace (vlib_buffer_t * b, u32 node_index)
876{
877#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
Benoît Gannef89bbbe2021-03-04 14:31:03 +0100878 if (PREDICT_FALSE (b->trajectory_nb >= VLIB_BUFFER_TRACE_TRAJECTORY_MAX))
879 return;
880 b->trajectory_trace[b->trajectory_nb] = node_index;
881 b->trajectory_nb++;
Dave Barach7bee7732017-10-18 18:48:11 -0400882#endif
883}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700884
Damjan Marion9a332e12017-03-28 15:11:20 +0200885static_always_inline u64
Ed Warnickecb9cada2015-12-08 15:45:58 -0700886dispatch_node (vlib_main_t * vm,
887 vlib_node_runtime_t * node,
888 vlib_node_type_t type,
889 vlib_node_state_t dispatch_state,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400890 vlib_frame_t * frame, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700891{
892 uword n, v;
893 u64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400894 vlib_node_main_t *nm = &vm->node_main;
895 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700896
897 if (CLIB_DEBUG > 0)
898 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400899 vlib_node_t *n = vlib_get_node (vm, node->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700900 ASSERT (n->type == type);
901 }
902
903 /* Only non-internal nodes may be disabled. */
904 if (type != VLIB_NODE_TYPE_INTERNAL && node->state != dispatch_state)
905 {
906 ASSERT (type != VLIB_NODE_TYPE_INTERNAL);
907 return last_time_stamp;
908 }
909
910 if ((type == VLIB_NODE_TYPE_PRE_INPUT || type == VLIB_NODE_TYPE_INPUT)
911 && dispatch_state != VLIB_NODE_STATE_INTERRUPT)
912 {
913 u32 c = node->input_main_loops_per_call;
914 /* Only call node when count reaches zero. */
915 if (c)
916 {
917 node->input_main_loops_per_call = c - 1;
918 return last_time_stamp;
919 }
920 }
921
922 /* Speculatively prefetch next frames. */
923 if (node->n_next_nodes > 0)
924 {
925 nf = vec_elt_at_index (nm->next_frames, node->next_frame_index);
926 CLIB_PREFETCH (nf, 4 * sizeof (nf[0]), WRITE);
927 }
928
929 vm->cpu_time_last_node_dispatch = last_time_stamp;
930
Dave Barach900cbad2019-01-31 19:12:51 -0500931 vlib_elog_main_loop_event (vm, node->node_index,
932 last_time_stamp, frame ? frame->n_vectors : 0,
933 /* is_after */ 0);
934
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000935 vlib_node_runtime_perf_counter (vm, node, frame, 0, last_time_stamp,
936 VLIB_NODE_RUNTIME_PERF_BEFORE);
Dave Barach900cbad2019-01-31 19:12:51 -0500937
938 /*
939 * Turn this on if you run into
940 * "bad monkey" contexts, and you want to know exactly
941 * which nodes they've visited... See ixge.c...
942 */
943 if (VLIB_BUFFER_TRACE_TRAJECTORY && frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700944 {
Dave Barach900cbad2019-01-31 19:12:51 -0500945 int i;
946 u32 *from;
947 from = vlib_frame_vector_args (frame);
948 for (i = 0; i < frame->n_vectors; i++)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400949 {
Dave Barach900cbad2019-01-31 19:12:51 -0500950 vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
951 add_trajectory_trace (b, node->node_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400952 }
Damjan Marion8b60fb02020-11-27 20:15:17 +0100953 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
954 n = node->function (vm, node, frame);
955 else
956 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -0500957 }
958 else
959 {
Damjan Marion8b60fb02020-11-27 20:15:17 +0100960 if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
961 n = node->function (vm, node, frame);
962 else
963 n = vm->dispatch_wrapper_fn (vm, node, frame);
Dave Barach900cbad2019-01-31 19:12:51 -0500964 }
965
966 t = clib_cpu_time_now ();
967
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000968 vlib_node_runtime_perf_counter (vm, node, frame, n, t,
969 VLIB_NODE_RUNTIME_PERF_AFTER);
Dave Barach900cbad2019-01-31 19:12:51 -0500970
971 vlib_elog_main_loop_event (vm, node->node_index, t, n, 1 /* is_after */ );
972
973 vm->main_loop_vectors_processed += n;
974 vm->main_loop_nodes_processed += n > 0;
975
976 v = vlib_node_runtime_update_stats (vm, node,
977 /* n_calls */ 1,
978 /* n_vectors */ n,
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +0000979 /* n_clocks */ t - last_time_stamp);
Dave Barach900cbad2019-01-31 19:12:51 -0500980
Florin Coras982e44f2021-03-19 13:12:41 -0700981 /* When in adaptive mode and vector rate crosses threshold switch to
982 polling mode and vice versa. */
983 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_ADAPTIVE_MODE))
Dave Barach900cbad2019-01-31 19:12:51 -0500984 {
985 /* *INDENT-OFF* */
986 ELOG_TYPE_DECLARE (e) =
987 {
988 .function = (char *) __FUNCTION__,
989 .format = "%s vector length %d, switching to %s",
990 .format_args = "T4i4t4",
991 .n_enum_strings = 2,
992 .enum_strings = {
993 "interrupt", "polling",
994 },
995 };
996 /* *INDENT-ON* */
997 struct
998 {
999 u32 node_name, vector_length, is_polling;
1000 } *ed;
1001
1002 if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT
1003 && v >= nm->polling_threshold_vector_length) &&
1004 !(node->flags &
1005 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
Dave Barach3ae28732018-11-16 17:19:00 -05001006 {
Dave Barach900cbad2019-01-31 19:12:51 -05001007 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1008 n->state = VLIB_NODE_STATE_POLLING;
1009 node->state = VLIB_NODE_STATE_POLLING;
1010 node->flags &=
1011 ~VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
1012 node->flags |= VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1013 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] -= 1;
1014 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015
Damjan Marionfd8deb42021-03-06 12:26:28 +01001016 if (PREDICT_FALSE (
1017 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001018 {
Dave Barach900cbad2019-01-31 19:12:51 -05001019 vlib_worker_thread_t *w = vlib_worker_threads
1020 + vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001021
Steven6aa75af2017-02-24 10:03:22 -08001022 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1023 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001024 ed->node_name = n->name_elog_string;
1025 ed->vector_length = v;
1026 ed->is_polling = 1;
1027 }
Dave Barach900cbad2019-01-31 19:12:51 -05001028 }
1029 else if (dispatch_state == VLIB_NODE_STATE_POLLING
1030 && v <= nm->interrupt_threshold_vector_length)
1031 {
1032 vlib_node_t *n = vlib_get_node (vm, node->node_index);
1033 if (node->flags &
1034 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001035 {
Dave Barach900cbad2019-01-31 19:12:51 -05001036 /* Switch to interrupt mode after dispatch in polling one more time.
1037 This allows driver to re-enable interrupts. */
1038 n->state = VLIB_NODE_STATE_INTERRUPT;
1039 node->state = VLIB_NODE_STATE_INTERRUPT;
1040 node->flags &=
1041 ~VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE;
1042 nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] -= 1;
1043 nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] += 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001044
Dave Barach900cbad2019-01-31 19:12:51 -05001045 }
1046 else
1047 {
1048 vlib_worker_thread_t *w = vlib_worker_threads
1049 + vm->thread_index;
1050 node->flags |=
1051 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001052 if (PREDICT_FALSE (
1053 vlib_get_first_main ()->elog_trace_graph_dispatch))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001054 {
Steven6aa75af2017-02-24 10:03:22 -08001055 ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
1056 w->elog_track);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001057 ed->node_name = n->name_elog_string;
1058 ed->vector_length = v;
1059 ed->is_polling = 0;
1060 }
1061 }
1062 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001063 }
1064
1065 return t;
1066}
1067
Damjan Marion9a332e12017-03-28 15:11:20 +02001068static u64
Dave Baracha6269992017-06-07 08:18:49 -04001069dispatch_pending_node (vlib_main_t * vm, uword pending_frame_index,
1070 u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001071{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001072 vlib_node_main_t *nm = &vm->node_main;
1073 vlib_frame_t *f;
Dave Barach11fb09e2020-08-06 12:10:09 -04001074 vlib_next_frame_t *nf, nf_placeholder;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001075 vlib_node_runtime_t *n;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001076 vlib_frame_t *restore_frame;
Dave Baracha6269992017-06-07 08:18:49 -04001077 vlib_pending_frame_t *p;
1078
1079 /* See comment below about dangling references to nm->pending_frames */
1080 p = nm->pending_frames + pending_frame_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001081
1082 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1083 p->node_runtime_index);
1084
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001085 f = vlib_get_frame (vm, p->frame);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001086 if (p->next_frame_index == VLIB_PENDING_FRAME_NO_NEXT_FRAME)
1087 {
Dave Barach11fb09e2020-08-06 12:10:09 -04001088 /* No next frame: so use placeholder on stack. */
1089 nf = &nf_placeholder;
Damjan Marion633b6fd2018-09-14 14:38:53 +02001090 nf->flags = f->frame_flags & VLIB_NODE_FLAG_TRACE;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001091 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001092 }
1093 else
1094 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
1095
Damjan Marion633b6fd2018-09-14 14:38:53 +02001096 ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001097
1098 /* Force allocation of new frame while current frame is being
1099 dispatched. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001100 restore_frame = NULL;
1101 if (nf->frame == p->frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001102 {
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001103 nf->frame = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001104 nf->flags &= ~VLIB_FRAME_IS_ALLOCATED;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001105 if (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH))
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001106 restore_frame = p->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001107 }
1108
1109 /* Frame must be pending. */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001110 ASSERT (f->frame_flags & VLIB_FRAME_PENDING);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001111 ASSERT (f->n_vectors > 0);
1112
1113 /* Copy trace flag from next frame to node.
1114 Trace flag indicates that at least one vector in the dispatched
1115 frame is traced. */
1116 n->flags &= ~VLIB_NODE_FLAG_TRACE;
1117 n->flags |= (nf->flags & VLIB_FRAME_TRACE) ? VLIB_NODE_FLAG_TRACE : 0;
1118 nf->flags &= ~VLIB_FRAME_TRACE;
1119
1120 last_time_stamp = dispatch_node (vm, n,
1121 VLIB_NODE_TYPE_INTERNAL,
1122 VLIB_NODE_STATE_POLLING,
1123 f, last_time_stamp);
Dave Baracha8df85c2019-10-01 13:34:23 -04001124 /* Internal node vector-rate accounting, for summary stats */
1125 vm->internal_node_vectors += f->n_vectors;
1126 vm->internal_node_calls++;
1127 vm->internal_node_last_vectors_per_main_loop =
1128 (f->n_vectors > vm->internal_node_last_vectors_per_main_loop) ?
1129 f->n_vectors : vm->internal_node_last_vectors_per_main_loop;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001130
Damjan Marion296988d2019-02-21 20:24:54 +01001131 f->frame_flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_NO_APPEND);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001132
1133 /* Frame is ready to be used again, so restore it. */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001134 if (restore_frame != NULL)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001135 {
Dave Baracha6269992017-06-07 08:18:49 -04001136 /*
1137 * We musn't restore a frame that is flagged to be freed. This
1138 * shouldn't happen since frames to be freed post dispatch are
1139 * those used when the to-node frame becomes full i.e. they form a
1140 * sort of queue of frames to a single node. If we get here then
1141 * the to-node frame and the pending frame *were* the same, and so
1142 * we removed the to-node frame. Therefore this frame is no
1143 * longer part of the queue for that node and hence it cannot be
1144 * it's overspill.
Neale Ranns88170612016-11-22 08:29:51 +00001145 */
Damjan Marion633b6fd2018-09-14 14:38:53 +02001146 ASSERT (!(f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH));
Neale Ranns88170612016-11-22 08:29:51 +00001147
Dave Baracha6269992017-06-07 08:18:49 -04001148 /*
1149 * NB: dispatching node n can result in the creation and scheduling
1150 * of new frames, and hence in the reallocation of nm->pending_frames.
1151 * Recompute p, or no supper. This was broken for more than 10 years.
1152 */
1153 p = nm->pending_frames + pending_frame_index;
1154
1155 /*
1156 * p->next_frame_index can change during node dispatch if node
1157 * function decides to change graph hook up.
1158 */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001159 nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001160 nf->flags |= VLIB_FRAME_IS_ALLOCATED;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001161
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001162 if (NULL == nf->frame)
Neale Ranns88170612016-11-22 08:29:51 +00001163 {
1164 /* no new frame has been assigned to this node, use the saved one */
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001165 nf->frame = restore_frame;
Neale Ranns88170612016-11-22 08:29:51 +00001166 f->n_vectors = 0;
Stanislav Zaikin3791a032022-03-31 14:16:28 +02001167 f->flags = 0;
Neale Ranns88170612016-11-22 08:29:51 +00001168 }
1169 else
1170 {
1171 /* The node has gained a frame, implying packets from the current frame
1172 were re-queued to this same node. we don't need the saved one
1173 anymore */
1174 vlib_frame_free (vm, n, f);
1175 }
1176 }
1177 else
Ed Warnickecb9cada2015-12-08 15:45:58 -07001178 {
Damjan Marion633b6fd2018-09-14 14:38:53 +02001179 if (f->frame_flags & VLIB_FRAME_FREE_AFTER_DISPATCH)
Neale Ranns88170612016-11-22 08:29:51 +00001180 {
1181 ASSERT (!(n->flags & VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH));
1182 vlib_frame_free (vm, n, f);
1183 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001184 }
1185
1186 return last_time_stamp;
1187}
1188
1189always_inline uword
1190vlib_process_stack_is_valid (vlib_process_t * p)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001191{
1192 return p->stack[0] == VLIB_PROCESS_STACK_MAGIC;
1193}
Ed Warnickecb9cada2015-12-08 15:45:58 -07001194
Dave Barach9b8ffd92016-07-08 08:13:45 -04001195typedef struct
1196{
1197 vlib_main_t *vm;
1198 vlib_process_t *process;
1199 vlib_frame_t *frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001200} vlib_process_bootstrap_args_t;
1201
1202/* Called in process stack. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001203static uword
1204vlib_process_bootstrap (uword _a)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001205{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001206 vlib_process_bootstrap_args_t *a;
1207 vlib_main_t *vm;
1208 vlib_node_runtime_t *node;
1209 vlib_frame_t *f;
1210 vlib_process_t *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001211 uword n;
1212
1213 a = uword_to_pointer (_a, vlib_process_bootstrap_args_t *);
1214
1215 vm = a->vm;
1216 p = a->process;
Damjan Marioncea46522020-05-21 16:47:05 +02001217 vlib_process_finish_switch_stack (vm);
1218
Ed Warnickecb9cada2015-12-08 15:45:58 -07001219 f = a->frame;
1220 node = &p->node_runtime;
1221
1222 n = node->function (vm, node, f);
1223
1224 ASSERT (vlib_process_stack_is_valid (p));
1225
Damjan Marioncea46522020-05-21 16:47:05 +02001226 vlib_process_start_switch_stack (vm, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001227 clib_longjmp (&p->return_longjmp, n);
1228
1229 return n;
1230}
1231
1232/* Called in main stack. */
1233static_always_inline uword
Dave Barach9b8ffd92016-07-08 08:13:45 -04001234vlib_process_startup (vlib_main_t * vm, vlib_process_t * p, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001235{
1236 vlib_process_bootstrap_args_t a;
1237 uword r;
1238
1239 a.vm = vm;
1240 a.process = p;
1241 a.frame = f;
1242
1243 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1244 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001245 {
1246 vlib_process_start_switch_stack (vm, p);
1247 r = clib_calljmp (vlib_process_bootstrap, pointer_to_uword (&a),
1248 (void *) p->stack + (1 << p->log2_n_stack_bytes));
1249 }
1250 else
1251 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001252
1253 return r;
1254}
1255
1256static_always_inline uword
Damjan Marioncea46522020-05-21 16:47:05 +02001257vlib_process_resume (vlib_main_t * vm, vlib_process_t * p)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001258{
1259 uword r;
1260 p->flags &= ~(VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1261 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT
1262 | VLIB_PROCESS_RESUME_PENDING);
1263 r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1264 if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
Damjan Marioncea46522020-05-21 16:47:05 +02001265 {
1266 vlib_process_start_switch_stack (vm, p);
1267 clib_longjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_RESUME);
1268 }
1269 else
1270 vlib_process_finish_switch_stack (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001271 return r;
1272}
1273
1274static u64
1275dispatch_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001276 vlib_process_t * p, vlib_frame_t * f, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001277{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001278 vlib_node_main_t *nm = &vm->node_main;
1279 vlib_node_runtime_t *node_runtime = &p->node_runtime;
1280 vlib_node_t *node = vlib_get_node (vm, node_runtime->node_index);
Florin Corasfd542f12018-05-16 19:28:24 -07001281 u32 old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001282 u64 t;
1283 uword n_vectors, is_suspend;
1284
1285 if (node->state != VLIB_NODE_STATE_POLLING
1286 || (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1287 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT)))
1288 return last_time_stamp;
1289
1290 p->flags |= VLIB_PROCESS_IS_RUNNING;
1291
1292 t = last_time_stamp;
1293 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1294 f ? f->n_vectors : 0, /* is_after */ 0);
1295
1296 /* Save away current process for suspend. */
Florin Corasfd542f12018-05-16 19:28:24 -07001297 old_process_index = nm->current_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001298 nm->current_process_index = node->runtime_index;
1299
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001300 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1301 VLIB_NODE_RUNTIME_PERF_BEFORE);
1302
Ed Warnickecb9cada2015-12-08 15:45:58 -07001303 n_vectors = vlib_process_startup (vm, p, f);
1304
Florin Corasfd542f12018-05-16 19:28:24 -07001305 nm->current_process_index = old_process_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001306
1307 ASSERT (n_vectors != VLIB_PROCESS_RETURN_LONGJMP_RETURN);
1308 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1309 if (is_suspend)
1310 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001311 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001312
1313 n_vectors = 0;
1314 pool_get (nm->suspended_process_frames, pf);
1315 pf->node_runtime_index = node->runtime_index;
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001316 pf->frame = f;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001317 pf->next_frame_index = ~0;
1318
1319 p->n_suspends += 1;
1320 p->suspended_process_frame_index = pf - nm->suspended_process_frames;
1321
1322 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001323 {
1324 TWT (tw_timer_wheel) * tw =
1325 (TWT (tw_timer_wheel) *) nm->timing_wheel;
1326 p->stop_timer_handle =
1327 TW (tw_timer_start) (tw,
1328 vlib_timing_wheel_data_set_suspended_process
1329 (node->runtime_index) /* [sic] pool idex */ ,
1330 0 /* timer_id */ ,
1331 p->resume_clock_interval);
1332 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001333 }
1334 else
1335 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
1336
1337 t = clib_cpu_time_now ();
1338
Dave Barach9b8ffd92016-07-08 08:13:45 -04001339 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, is_suspend,
1340 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001341
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001342 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1343 VLIB_NODE_RUNTIME_PERF_AFTER);
1344
Ed Warnickecb9cada2015-12-08 15:45:58 -07001345 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001346 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001347 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001348 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001349
1350 return t;
1351}
1352
Dave Barach9b8ffd92016-07-08 08:13:45 -04001353void
1354vlib_start_process (vlib_main_t * vm, uword process_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001355{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001356 vlib_node_main_t *nm = &vm->node_main;
1357 vlib_process_t *p = vec_elt (nm->processes, process_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001358 dispatch_process (vm, p, /* frame */ 0, /* cpu_time_now */ 0);
1359}
1360
1361static u64
1362dispatch_suspended_process (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001363 uword process_index, u64 last_time_stamp)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001364{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001365 vlib_node_main_t *nm = &vm->node_main;
1366 vlib_node_runtime_t *node_runtime;
1367 vlib_node_t *node;
1368 vlib_frame_t *f;
1369 vlib_process_t *p;
1370 vlib_pending_frame_t *pf;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001371 u64 t, n_vectors, is_suspend;
Dave Barach9b8ffd92016-07-08 08:13:45 -04001372
Ed Warnickecb9cada2015-12-08 15:45:58 -07001373 t = last_time_stamp;
1374
1375 p = vec_elt (nm->processes, process_index);
Dave Barach9b8ffd92016-07-08 08:13:45 -04001376 if (PREDICT_FALSE (!(p->flags & VLIB_PROCESS_IS_RUNNING)))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001377 return last_time_stamp;
1378
1379 ASSERT (p->flags & (VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
1380 | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT));
1381
Florin Coras221d6f12018-11-07 20:46:38 -08001382 pf = pool_elt_at_index (nm->suspended_process_frames,
1383 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001384
1385 node_runtime = &p->node_runtime;
1386 node = vlib_get_node (vm, node_runtime->node_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +02001387 f = pf->frame;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001388
Dave Barach9b8ffd92016-07-08 08:13:45 -04001389 vlib_elog_main_loop_event (vm, node_runtime->node_index, t,
1390 f ? f->n_vectors : 0, /* is_after */ 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001391
1392 /* Save away current process for suspend. */
1393 nm->current_process_index = node->runtime_index;
1394
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001395 vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
1396 VLIB_NODE_RUNTIME_PERF_BEFORE);
1397
Damjan Marioncea46522020-05-21 16:47:05 +02001398 n_vectors = vlib_process_resume (vm, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001399 t = clib_cpu_time_now ();
1400
1401 nm->current_process_index = ~0;
1402
1403 is_suspend = n_vectors == VLIB_PROCESS_RETURN_LONGJMP_SUSPEND;
1404 if (is_suspend)
1405 {
1406 /* Suspend it again. */
1407 n_vectors = 0;
1408 p->n_suspends += 1;
1409 if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
Dave Barach5c20a012017-06-13 08:48:31 -04001410 {
1411 p->stop_timer_handle =
1412 TW (tw_timer_start) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
1413 vlib_timing_wheel_data_set_suspended_process
1414 (node->runtime_index) /* [sic] pool idex */ ,
1415 0 /* timer_id */ ,
1416 p->resume_clock_interval);
1417 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001418 }
1419 else
1420 {
1421 p->flags &= ~VLIB_PROCESS_IS_RUNNING;
Florin Coras221d6f12018-11-07 20:46:38 -08001422 pool_put_index (nm->suspended_process_frames,
1423 p->suspended_process_frame_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001424 p->suspended_process_frame_index = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001425 }
1426
1427 t = clib_cpu_time_now ();
Dave Barach9b8ffd92016-07-08 08:13:45 -04001428 vlib_elog_main_loop_event (vm, node_runtime->node_index, t, !is_suspend,
1429 /* is_after */ 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001430
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001431 vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
1432 VLIB_NODE_RUNTIME_PERF_AFTER);
1433
Ed Warnickecb9cada2015-12-08 15:45:58 -07001434 vlib_process_update_stats (vm, p,
Dave Barach9b8ffd92016-07-08 08:13:45 -04001435 /* n_calls */ !is_suspend,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001436 /* n_vectors */ n_vectors,
Dave Barachec595ef2019-01-24 10:34:24 -05001437 /* n_clocks */ t - last_time_stamp);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001438
1439 return t;
1440}
1441
Dave Barach2877eee2017-12-15 12:22:57 -05001442void vl_api_send_pending_rpc_requests (vlib_main_t *) __attribute__ ((weak));
1443void
1444vl_api_send_pending_rpc_requests (vlib_main_t * vm)
1445{
1446}
1447
Damjan Marione9d52d52017-03-09 15:42:26 +01001448static_always_inline void
1449vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001450{
Dave Barach9b8ffd92016-07-08 08:13:45 -04001451 vlib_node_main_t *nm = &vm->node_main;
Damjan Marione9d52d52017-03-09 15:42:26 +01001452 vlib_thread_main_t *tm = vlib_get_thread_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001453 uword i;
1454 u64 cpu_time_now;
Dave Barach000a0292020-02-17 17:07:12 -05001455 f64 now;
Damjan Marione9d52d52017-03-09 15:42:26 +01001456 vlib_frame_queue_main_t *fqm;
Dave Barach80965f52019-03-11 09:57:38 -04001457 u32 frame_queue_check_counter = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001458
1459 /* Initialize pending node vector. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001460 if (is_main)
1461 {
1462 vec_resize (nm->pending_frames, 32);
Damjan Marion8bea5892022-04-04 22:40:45 +02001463 vec_set_len (nm->pending_frames, 0);
Damjan Marione9d52d52017-03-09 15:42:26 +01001464 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001465
1466 /* Mark time of main loop start. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001467 if (is_main)
1468 {
1469 cpu_time_now = vm->clib_time.last_cpu_time;
1470 vm->cpu_time_main_loop_start = cpu_time_now;
1471 }
1472 else
1473 cpu_time_now = clib_cpu_time_now ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001474
Damjan Marion2c2b6402017-03-28 14:16:15 +02001475 /* Pre-allocate interupt runtime indices and lock. */
Damjan Marion94100532020-11-06 23:25:57 +01001476 vec_alloc_aligned (nm->pending_interrupts, 1, CLIB_CACHE_LINE_BYTES);
Damjan Marion2c2b6402017-03-28 14:16:15 +02001477
1478 /* Pre-allocate expired nodes. */
Steven7312cc72017-03-15 21:18:55 -07001479 if (!nm->polling_threshold_vector_length)
1480 nm->polling_threshold_vector_length = 10;
1481 if (!nm->interrupt_threshold_vector_length)
1482 nm->interrupt_threshold_vector_length = 5;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001483
Damjan Marion29c0b332019-01-28 13:41:27 +01001484 vm->cpu_id = clib_get_current_cpu_id ();
1485 vm->numa_node = clib_get_current_numa_node ();
Florin Coras4c959952020-02-09 18:09:31 +00001486 os_set_numa_index (vm->numa_node);
Damjan Marion29c0b332019-01-28 13:41:27 +01001487
Ed Warnickecb9cada2015-12-08 15:45:58 -07001488 /* Start all processes. */
Damjan Marione9d52d52017-03-09 15:42:26 +01001489 if (is_main)
1490 {
1491 uword i;
Dave Barachc602b382019-06-03 19:48:22 -04001492
1493 /*
1494 * Perform an initial barrier sync. Pays no attention to
1495 * the barrier sync hold-down timer scheme, which won't work
1496 * at this point in time.
1497 */
1498 vlib_worker_thread_initial_barrier_sync_and_release (vm);
1499
Stevenf3b53642017-05-01 14:03:02 -07001500 nm->current_process_index = ~0;
Damjan Marione9d52d52017-03-09 15:42:26 +01001501 for (i = 0; i < vec_len (nm->processes); i++)
1502 cpu_time_now = dispatch_process (vm, nm->processes[i], /* frame */ 0,
1503 cpu_time_now);
1504 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001505
1506 while (1)
1507 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04001508 vlib_node_runtime_t *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001509
Dave Barach2877eee2017-12-15 12:22:57 -05001510 if (PREDICT_FALSE (_vec_len (vm->pending_rpc_requests) > 0))
Dave Barachf6c68d72018-11-01 08:12:52 -04001511 {
1512 if (!is_main)
1513 vl_api_send_pending_rpc_requests (vm);
1514 }
Dave Barach2877eee2017-12-15 12:22:57 -05001515
Damjan Marione9d52d52017-03-09 15:42:26 +01001516 if (!is_main)
Damjan Marionf6e6c782020-09-17 09:54:07 +02001517 vlib_worker_thread_barrier_check ();
1518
1519 if (PREDICT_FALSE (vm->check_frame_queues + frame_queue_check_counter))
Damjan Marione9d52d52017-03-09 15:42:26 +01001520 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001521 u32 processed = 0;
Damjan Marioneee099e2021-05-01 14:56:13 +02001522 vlib_frame_queue_dequeue_fn_t *fn =
1523 vlib_buffer_func_main.frame_queue_dequeue_fn;
Damjan Marionf6e6c782020-09-17 09:54:07 +02001524
1525 if (vm->check_frame_queues)
Dave Barach80965f52019-03-11 09:57:38 -04001526 {
Damjan Marionf6e6c782020-09-17 09:54:07 +02001527 frame_queue_check_counter = 100;
1528 vm->check_frame_queues = 0;
Dave Barach80965f52019-03-11 09:57:38 -04001529 }
Damjan Marionf6e6c782020-09-17 09:54:07 +02001530
1531 vec_foreach (fqm, tm->frame_queue_mains)
Damjan Marioneee099e2021-05-01 14:56:13 +02001532 processed += (fn) (vm, fqm);
Damjan Marionf6e6c782020-09-17 09:54:07 +02001533
1534 /* No handoff queue work found? */
1535 if (processed)
1536 frame_queue_check_counter = 100;
1537 else
1538 frame_queue_check_counter--;
Damjan Marione9d52d52017-03-09 15:42:26 +01001539 }
1540
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001541 if (PREDICT_FALSE (vec_len (vm->worker_thread_main_loop_callbacks)))
1542 clib_call_callbacks (vm->worker_thread_main_loop_callbacks, vm,
1543 cpu_time_now);
1544
Ed Warnickecb9cada2015-12-08 15:45:58 -07001545 /* Process pre-input nodes. */
Tom Seidenberg6c81f5a2020-07-10 15:49:03 +00001546 cpu_time_now = clib_cpu_time_now ();
Damjan Marionceab7882018-01-19 20:56:12 +01001547 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1548 cpu_time_now = dispatch_node (vm, n,
1549 VLIB_NODE_TYPE_PRE_INPUT,
1550 VLIB_NODE_STATE_POLLING,
1551 /* frame */ 0,
1552 cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001553
1554 /* Next process input nodes. */
1555 vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1556 cpu_time_now = dispatch_node (vm, n,
1557 VLIB_NODE_TYPE_INPUT,
1558 VLIB_NODE_STATE_POLLING,
1559 /* frame */ 0,
1560 cpu_time_now);
1561
Damjan Marione9d52d52017-03-09 15:42:26 +01001562 if (PREDICT_TRUE (is_main && vm->queue_signal_pending == 0))
Dave Barach9b8ffd92016-07-08 08:13:45 -04001563 vm->queue_signal_callback (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001564
Damjan Marion94100532020-11-06 23:25:57 +01001565 if (__atomic_load_n (nm->pending_interrupts, __ATOMIC_ACQUIRE))
Damjan Marion0b316302020-09-09 18:55:16 +02001566 {
Damjan Marion94100532020-11-06 23:25:57 +01001567 int int_num = -1;
1568 *nm->pending_interrupts = 0;
Dave Barachd47c5092018-01-19 13:09:20 -05001569
Damjan Marion94100532020-11-06 23:25:57 +01001570 while ((int_num =
1571 clib_interrupt_get_next (nm->interrupts, int_num)) != -1)
1572 {
1573 vlib_node_runtime_t *n;
1574 clib_interrupt_clear (nm->interrupts, int_num);
1575 n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1576 int_num);
1577 cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,
1578 VLIB_NODE_STATE_INTERRUPT,
1579 /* frame */ 0, cpu_time_now);
1580 }
Damjan Marion1033b492020-06-03 12:20:41 +02001581 }
1582
Dave Barache3248982018-08-14 13:47:58 -04001583 /* Input nodes may have added work to the pending vector.
1584 Process pending vector until there is nothing left.
1585 All pending vectors will be processed from input -> output. */
1586 for (i = 0; i < _vec_len (nm->pending_frames); i++)
1587 cpu_time_now = dispatch_pending_node (vm, i, cpu_time_now);
1588 /* Reset pending vector for next iteration. */
Damjan Marion8bea5892022-04-04 22:40:45 +02001589 vec_set_len (nm->pending_frames, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001590
Damjan Marione9d52d52017-03-09 15:42:26 +01001591 if (is_main)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001592 {
Dave Barach900cbad2019-01-31 19:12:51 -05001593 /* *INDENT-OFF* */
1594 ELOG_TYPE_DECLARE (es) =
1595 {
1596 .format = "process tw start",
1597 .format_args = "",
1598 };
1599 ELOG_TYPE_DECLARE (ee) =
1600 {
1601 .format = "process tw end: %d",
1602 .format_args = "i4",
1603 };
1604 /* *INDENT-ON* */
1605
1606 struct
1607 {
1608 int nready_procs;
1609 } *ed;
1610
Damjan Marione9d52d52017-03-09 15:42:26 +01001611 /* Check if process nodes have expired from timing wheel. */
Dave Barach5c20a012017-06-13 08:48:31 -04001612 ASSERT (nm->data_from_advancing_timing_wheel != 0);
1613
Dave Barach900cbad2019-01-31 19:12:51 -05001614 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1615 ed = ELOG_DATA (&vlib_global_main.elog_main, es);
1616
Dave Barach5c20a012017-06-13 08:48:31 -04001617 nm->data_from_advancing_timing_wheel =
1618 TW (tw_timer_expire_timers_vec)
1619 ((TWT (tw_timer_wheel) *) nm->timing_wheel, vlib_time_now (vm),
1620 nm->data_from_advancing_timing_wheel);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001621
Damjan Marione9d52d52017-03-09 15:42:26 +01001622 ASSERT (nm->data_from_advancing_timing_wheel != 0);
Dave Barach5c20a012017-06-13 08:48:31 -04001623
Dave Barach900cbad2019-01-31 19:12:51 -05001624 if (PREDICT_FALSE (vm->elog_trace_graph_dispatch))
1625 {
1626 ed = ELOG_DATA (&vlib_global_main.elog_main, ee);
1627 ed->nready_procs =
1628 _vec_len (nm->data_from_advancing_timing_wheel);
1629 }
1630
Damjan Marione9d52d52017-03-09 15:42:26 +01001631 if (PREDICT_FALSE
1632 (_vec_len (nm->data_from_advancing_timing_wheel) > 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001633 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001634 uword i;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001635
Damjan Marione9d52d52017-03-09 15:42:26 +01001636 for (i = 0; i < _vec_len (nm->data_from_advancing_timing_wheel);
1637 i++)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001638 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001639 u32 d = nm->data_from_advancing_timing_wheel[i];
1640 u32 di = vlib_timing_wheel_data_get_index (d);
1641
1642 if (vlib_timing_wheel_data_is_timed_event (d))
1643 {
1644 vlib_signal_timed_event_data_t *te =
1645 pool_elt_at_index (nm->signal_timed_event_data_pool,
1646 di);
1647 vlib_node_t *n =
1648 vlib_get_node (vm, te->process_node_index);
1649 vlib_process_t *p =
1650 vec_elt (nm->processes, n->runtime_index);
1651 void *data;
1652 data =
1653 vlib_process_signal_event_helper (nm, n, p,
1654 te->event_type_index,
1655 te->n_data_elts,
1656 te->n_data_elt_bytes);
1657 if (te->n_data_bytes < sizeof (te->inline_event_data))
Dave Barach178cf492018-11-13 16:34:13 -05001658 clib_memcpy_fast (data, te->inline_event_data,
1659 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001660 else
1661 {
Dave Barach178cf492018-11-13 16:34:13 -05001662 clib_memcpy_fast (data, te->event_data_as_vector,
1663 te->n_data_bytes);
Damjan Marione9d52d52017-03-09 15:42:26 +01001664 vec_free (te->event_data_as_vector);
1665 }
1666 pool_put (nm->signal_timed_event_data_pool, te);
1667 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001668 else
1669 {
Damjan Marione9d52d52017-03-09 15:42:26 +01001670 cpu_time_now = clib_cpu_time_now ();
1671 cpu_time_now =
1672 dispatch_suspended_process (vm, di, cpu_time_now);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001673 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001674 }
Damjan Marion8bea5892022-04-04 22:40:45 +02001675 vec_set_len (nm->data_from_advancing_timing_wheel, 0);
Damjan Marione9d52d52017-03-09 15:42:26 +01001676 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001677 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001678 vlib_increment_main_loop_counter (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001679 /* Record time stamp in case there are no enabled nodes and above
Dave Barach9b8ffd92016-07-08 08:13:45 -04001680 calls do not update time stamp. */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001681 cpu_time_now = clib_cpu_time_now ();
Dave Barach000a0292020-02-17 17:07:12 -05001682 vm->loops_this_reporting_interval++;
1683 now = clib_time_now_internal (&vm->clib_time, cpu_time_now);
1684 /* Time to update loops_per_second? */
1685 if (PREDICT_FALSE (now >= vm->loop_interval_end))
1686 {
1687 /* Next sample ends in 20ms */
1688 if (vm->loop_interval_start)
1689 {
1690 f64 this_loops_per_second;
1691
1692 this_loops_per_second =
1693 ((f64) vm->loops_this_reporting_interval) / (now -
1694 vm->loop_interval_start);
1695
1696 vm->loops_per_second =
1697 vm->loops_per_second * vm->damping_constant +
1698 (1.0 - vm->damping_constant) * this_loops_per_second;
1699 if (vm->loops_per_second != 0.0)
1700 vm->seconds_per_loop = 1.0 / vm->loops_per_second;
1701 else
1702 vm->seconds_per_loop = 0.0;
1703 }
1704 /* New interval starts now, and ends in 20ms */
1705 vm->loop_interval_start = now;
1706 vm->loop_interval_end = now + 2e-4;
1707 vm->loops_this_reporting_interval = 0;
1708 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001709 }
1710}
Dave Barach9b8ffd92016-07-08 08:13:45 -04001711
Damjan Marione9d52d52017-03-09 15:42:26 +01001712static void
1713vlib_main_loop (vlib_main_t * vm)
1714{
1715 vlib_main_or_worker_loop (vm, /* is_main */ 1);
1716}
1717
1718void
1719vlib_worker_loop (vlib_main_t * vm)
1720{
1721 vlib_main_or_worker_loop (vm, /* is_main */ 0);
1722}
1723
Damjan Marionfd8deb42021-03-06 12:26:28 +01001724vlib_global_main_t vlib_global_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001725
Damjan Marion25ab6c52021-03-05 14:41:25 +01001726void
1727vlib_add_del_post_mortem_callback (void *cb, int is_add)
1728{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001729 vlib_global_main_t *vgm = vlib_get_global_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001730 int i;
1731
1732 if (is_add == 0)
1733 {
Damjan Marionfd8deb42021-03-06 12:26:28 +01001734 for (i = vec_len (vgm->post_mortem_callbacks) - 1; i >= 0; i--)
1735 if (vgm->post_mortem_callbacks[i] == cb)
1736 vec_del1 (vgm->post_mortem_callbacks, i);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001737 return;
1738 }
1739
Damjan Marionfd8deb42021-03-06 12:26:28 +01001740 for (i = 0; i < vec_len (vgm->post_mortem_callbacks); i++)
1741 if (vgm->post_mortem_callbacks[i] == cb)
Damjan Marion25ab6c52021-03-05 14:41:25 +01001742 return;
Damjan Marionfd8deb42021-03-06 12:26:28 +01001743 vec_add1 (vgm->post_mortem_callbacks, cb);
Damjan Marion25ab6c52021-03-05 14:41:25 +01001744}
1745
1746static void
1747elog_post_mortem_dump (void)
1748{
Damjan Marionf553a2c2021-03-26 13:45:37 +01001749 elog_main_t *em = vlib_get_elog_main ();
Damjan Marion25ab6c52021-03-05 14:41:25 +01001750
1751 u8 *filename;
1752 clib_error_t *error;
1753
1754 filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
1755 error = elog_write_file (em, (char *) filename, 1 /* flush ring */);
1756 if (error)
1757 clib_error_report (error);
1758 /*
1759 * We're in the middle of crashing. Don't try to free the filename.
1760 */
1761}
1762
Ed Warnickecb9cada2015-12-08 15:45:58 -07001763static clib_error_t *
1764vlib_main_configure (vlib_main_t * vm, unformat_input_t * input)
1765{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001766 vlib_global_main_t *vgm = vlib_get_global_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001767 int turn_on_mem_trace = 0;
1768
1769 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1770 {
1771 if (unformat (input, "memory-trace"))
1772 turn_on_mem_trace = 1;
1773
1774 else if (unformat (input, "elog-events %d",
Damjan Marionfd8deb42021-03-06 12:26:28 +01001775 &vgm->configured_elog_ring_size))
1776 vgm->configured_elog_ring_size =
1777 1 << max_log2 (vgm->configured_elog_ring_size);
Dave Barach81481312017-05-16 09:08:14 -04001778 else if (unformat (input, "elog-post-mortem-dump"))
Damjan Marion25ab6c52021-03-05 14:41:25 +01001779 vlib_add_del_post_mortem_callback (elog_post_mortem_dump,
1780 /* is_add */ 1);
Dave Barachc74b43c2020-04-09 17:24:07 -04001781 else if (unformat (input, "buffer-alloc-success-rate %f",
1782 &vm->buffer_alloc_success_rate))
1783 {
1784 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1785 return clib_error_return
1786 (0, "Buffer fault injection not configured");
1787 }
1788 else if (unformat (input, "buffer-alloc-success-seed %u",
1789 &vm->buffer_alloc_success_seed))
1790 {
1791 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
1792 return clib_error_return
1793 (0, "Buffer fault injection not configured");
1794 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001795 else
1796 return unformat_parse_error (input);
1797 }
1798
1799 unformat_free (input);
1800
1801 /* Enable memory trace as early as possible. */
1802 if (turn_on_mem_trace)
1803 clib_mem_trace (1);
1804
1805 return 0;
1806}
1807
1808VLIB_EARLY_CONFIG_FUNCTION (vlib_main_configure, "vlib");
1809
Dave Barach9b8ffd92016-07-08 08:13:45 -04001810static void
Dave Barach11fb09e2020-08-06 12:10:09 -04001811placeholder_queue_signal_callback (vlib_main_t * vm)
Dave Barach9b8ffd92016-07-08 08:13:45 -04001812{
1813}
Dave Barach16c75df2016-05-31 14:05:46 -04001814
Dave Barach1f806582018-06-14 09:18:21 -04001815#define foreach_weak_reference_stub \
Dave Barach1f806582018-06-14 09:18:21 -04001816_(vpe_api_init) \
1817_(vlibmemory_init) \
1818_(map_api_segment_init)
1819
1820#define _(name) \
1821clib_error_t *name (vlib_main_t *vm) __attribute__((weak)); \
1822clib_error_t *name (vlib_main_t *vm) { return 0; }
1823foreach_weak_reference_stub;
1824#undef _
1825
Dave Barachb09f4d02019-07-15 16:00:03 -04001826void vl_api_set_elog_main (elog_main_t * m) __attribute__ ((weak));
1827void
1828vl_api_set_elog_main (elog_main_t * m)
1829{
1830 clib_warning ("STUB");
1831}
1832
1833int vl_api_set_elog_trace_api_messages (int enable) __attribute__ ((weak));
1834int
1835vl_api_set_elog_trace_api_messages (int enable)
1836{
1837 clib_warning ("STUB");
1838 return 0;
1839}
1840
1841int vl_api_get_elog_trace_api_messages (void) __attribute__ ((weak));
1842int
1843vl_api_get_elog_trace_api_messages (void)
1844{
1845 clib_warning ("STUB");
1846 return 0;
1847}
1848
Ed Warnickecb9cada2015-12-08 15:45:58 -07001849/* Main function. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001850int
Eyal Barid334a6b2016-09-19 10:23:39 +03001851vlib_main (vlib_main_t * volatile vm, unformat_input_t * input)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001852{
Damjan Marionfd8deb42021-03-06 12:26:28 +01001853 vlib_global_main_t *vgm = vlib_get_global_main ();
Eyal Barid334a6b2016-09-19 10:23:39 +03001854 clib_error_t *volatile error;
Dave Barach5c20a012017-06-13 08:48:31 -04001855 vlib_node_main_t *nm = &vm->node_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001856
Dave Barach11fb09e2020-08-06 12:10:09 -04001857 vm->queue_signal_callback = placeholder_queue_signal_callback;
Dave Barach16c75df2016-05-31 14:05:46 -04001858
Dave Barachbc867c32020-11-25 10:07:09 -05001859 /* Reconfigure event log which is enabled very early */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001860 if (vgm->configured_elog_ring_size &&
1861 vgm->configured_elog_ring_size != vgm->elog_main.event_ring_size)
1862 elog_resize (&vgm->elog_main, vgm->configured_elog_ring_size);
Damjan Marionf553a2c2021-03-26 13:45:37 +01001863 vl_api_set_elog_main (vlib_get_elog_main ());
Dave Barachb09f4d02019-07-15 16:00:03 -04001864 (void) vl_api_set_elog_trace_api_messages (1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001865
1866 /* Default name. */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001867 if (!vgm->name)
1868 vgm->name = "VLIB";
Ed Warnickecb9cada2015-12-08 15:45:58 -07001869
Damjan Marion68b4da62018-09-30 18:26:20 +02001870 if ((error = vlib_physmem_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001871 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001872 clib_error_report (error);
1873 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001874 }
Damjan Marion49d66f12017-07-20 18:10:35 +02001875
Damjan Marion62d656a2022-03-09 16:10:54 +01001876 if ((error = vlib_log_init (vm)))
1877 {
1878 clib_error_report (error);
1879 goto done;
1880 }
1881
Damjan Marion8973b072022-03-01 15:51:18 +01001882 if ((error = vlib_stats_init (vm)))
Filip Tehlard2bbdef2019-02-22 05:05:53 -08001883 {
1884 clib_error_report (error);
1885 goto done;
1886 }
1887
Damjan Marion49d66f12017-07-20 18:10:35 +02001888 if ((error = vlib_buffer_main_init (vm)))
Damjan Marion04a7f052017-07-10 15:06:17 +02001889 {
Damjan Marion49d66f12017-07-20 18:10:35 +02001890 clib_error_report (error);
1891 goto done;
Damjan Marion04a7f052017-07-10 15:06:17 +02001892 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001893
1894 if ((error = vlib_thread_init (vm)))
1895 {
1896 clib_error_report (error);
1897 goto done;
1898 }
1899
Damjan Mariona31698b2021-03-10 14:35:28 +01001900 /* Register node ifunction variants */
1901 vlib_register_all_node_march_variants (vm);
1902
Ed Warnickecb9cada2015-12-08 15:45:58 -07001903 /* Register static nodes so that init functions may use them. */
1904 vlib_register_all_static_nodes (vm);
1905
1906 /* Set seed for random number generator.
1907 Allow user to specify seed to make random sequence deterministic. */
Dave Barach9b8ffd92016-07-08 08:13:45 -04001908 if (!unformat (input, "seed %wd", &vm->random_seed))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001909 vm->random_seed = clib_cpu_time_now ();
1910 clib_random_buffer_init (&vm->random_buffer, vm->random_seed);
1911
Ed Warnickecb9cada2015-12-08 15:45:58 -07001912 /* Initialize node graph. */
1913 if ((error = vlib_node_main_init (vm)))
1914 {
1915 /* Arrange for graph hook up error to not be fatal when debugging. */
1916 if (CLIB_DEBUG > 0)
1917 clib_error_report (error);
1918 else
1919 goto done;
1920 }
1921
Dave Barach1f806582018-06-14 09:18:21 -04001922 /* Direct call / weak reference, for vlib standalone use-cases */
1923 if ((error = vpe_api_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001924 {
1925 clib_error_report (error);
1926 goto done;
1927 }
1928
Dave Barach1f806582018-06-14 09:18:21 -04001929 if ((error = vlibmemory_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001930 {
1931 clib_error_report (error);
1932 goto done;
1933 }
1934
Dave Barach1f806582018-06-14 09:18:21 -04001935 if ((error = map_api_segment_init (vm)))
Dave Barach048a4e52018-06-01 18:52:25 -04001936 {
1937 clib_error_report (error);
1938 goto done;
1939 }
1940
Ole Troan964f93e2016-06-10 13:22:36 +02001941 /* See unix/main.c; most likely already set up */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001942 if (vgm->init_functions_called == 0)
1943 vgm->init_functions_called = hash_create (0, /* value bytes */ 0);
Ole Troan964f93e2016-06-10 13:22:36 +02001944 if ((error = vlib_call_all_init_functions (vm)))
1945 goto done;
1946
Dave Barach5c20a012017-06-13 08:48:31 -04001947 nm->timing_wheel = clib_mem_alloc_aligned (sizeof (TWT (tw_timer_wheel)),
1948 CLIB_CACHE_LINE_BYTES);
1949
1950 vec_validate (nm->data_from_advancing_timing_wheel, 10);
Damjan Marion8bea5892022-04-04 22:40:45 +02001951 vec_set_len (nm->data_from_advancing_timing_wheel, 0);
Dave Barach5c20a012017-06-13 08:48:31 -04001952
1953 /* Create the process timing wheel */
1954 TW (tw_timer_wheel_init) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
1955 0 /* no callback */ ,
1956 10e-6 /* timer period 10us */ ,
1957 ~0 /* max expirations per call */ );
1958
Dave Barach2877eee2017-12-15 12:22:57 -05001959 vec_validate (vm->pending_rpc_requests, 0);
Damjan Marion8bea5892022-04-04 22:40:45 +02001960 vec_set_len (vm->pending_rpc_requests, 0);
Dave Barachf6c68d72018-11-01 08:12:52 -04001961 vec_validate (vm->processing_rpc_requests, 0);
Damjan Marion8bea5892022-04-04 22:40:45 +02001962 vec_set_len (vm->processing_rpc_requests, 0);
Dave Barach2877eee2017-12-15 12:22:57 -05001963
Dave Barachc74b43c2020-04-09 17:24:07 -04001964 /* Default params for the buffer allocator fault injector, if configured */
1965 if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
1966 {
1967 vm->buffer_alloc_success_seed = 0xdeaddabe;
1968 vm->buffer_alloc_success_rate = 0.80;
1969 }
1970
Dave Barachd1e17d02019-03-21 18:01:48 -04001971 if ((error = vlib_call_all_config_functions (vm, input, 0 /* is_early */ )))
1972 goto done;
1973
Dave Barach000a0292020-02-17 17:07:12 -05001974 /*
1975 * Use exponential smoothing, with a half-life of 1 second
1976 * reported_rate(t) = reported_rate(t-1) * K + rate(t)*(1-K)
1977 *
1978 * Sample every 20ms, aka 50 samples per second
1979 * K = exp (-1.0/20.0);
1980 * K = 0.95
1981 */
1982 vm->damping_constant = exp (-1.0 / 20.0);
1983
Dave Barachc602b382019-06-03 19:48:22 -04001984 /* Sort per-thread init functions before we start threads */
Damjan Marionfd8deb42021-03-06 12:26:28 +01001985 vlib_sort_init_exit_functions (&vgm->worker_init_function_registrations);
Dave Barachc602b382019-06-03 19:48:22 -04001986
Dave Barachd1e17d02019-03-21 18:01:48 -04001987 /* Call all main loop enter functions. */
1988 {
1989 clib_error_t *sub_error;
1990 sub_error = vlib_call_all_main_loop_enter_functions (vm);
1991 if (sub_error)
1992 clib_error_report (sub_error);
1993 }
1994
Ed Warnickecb9cada2015-12-08 15:45:58 -07001995 switch (clib_setjmp (&vm->main_loop_exit, VLIB_MAIN_LOOP_EXIT_NONE))
1996 {
1997 case VLIB_MAIN_LOOP_EXIT_NONE:
1998 vm->main_loop_exit_set = 1;
1999 break;
2000
2001 case VLIB_MAIN_LOOP_EXIT_CLI:
2002 goto done;
2003
2004 default:
2005 error = vm->main_loop_error;
2006 goto done;
2007 }
2008
Ed Warnickecb9cada2015-12-08 15:45:58 -07002009 vlib_main_loop (vm);
2010
Dave Barach9b8ffd92016-07-08 08:13:45 -04002011done:
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302012 vlib_worker_thread_barrier_sync (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002013 /* Call all exit functions. */
2014 {
Dave Barach9b8ffd92016-07-08 08:13:45 -04002015 clib_error_t *sub_error;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002016 sub_error = vlib_call_all_main_loop_exit_functions (vm);
2017 if (sub_error)
2018 clib_error_report (sub_error);
2019 }
Kommula Shiva Shankarced43e22021-01-28 13:05:59 +05302020 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002021
2022 if (error)
2023 clib_error_report (error);
2024
Pierre Pfisterc26cc722021-09-10 16:38:03 +02002025 return vm->main_loop_exit_status;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002026}
Dave Barach9b8ffd92016-07-08 08:13:45 -04002027
Dave Barachab1a50c2020-10-06 14:08:16 -04002028vlib_main_t *
2029vlib_get_main_not_inline (void)
2030{
2031 return vlib_get_main ();
2032}
2033
2034elog_main_t *
2035vlib_get_elog_main_not_inline ()
2036{
2037 return &vlib_global_main.elog_main;
2038}
2039
Pierre Pfisterc26cc722021-09-10 16:38:03 +02002040void
2041vlib_exit_with_status (vlib_main_t *vm, int status)
2042{
2043 vm->main_loop_exit_status = status;
2044 __atomic_store_n (&vm->main_loop_exit_now, 1, __ATOMIC_RELEASE);
2045}
2046
Dave Barach9b8ffd92016-07-08 08:13:45 -04002047/*
2048 * fd.io coding-style-patch-verification: ON
2049 *
2050 * Local Variables:
2051 * eval: (c-set-style "gnu")
2052 * End:
2053 */