blob: fa085387e4b6370fc8137e7bd51032c8ca1d6ddb [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * trace.c: VLIB trace buffer.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vlib/threads.h>
Jon Loeliger5c1e48c2020-10-15 14:41:36 -040042#include <vnet/classify/vnet_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
Dave Barach11fb09e2020-08-06 12:10:09 -040044u8 *vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040045
Ed Warnickecb9cada2015-12-08 15:45:58 -070046/* Helper function for nodes which only trace buffer data. */
47void
48vlib_trace_frame_buffers_only (vlib_main_t * vm,
49 vlib_node_runtime_t * node,
50 u32 * buffers,
51 uword n_buffers,
52 uword next_buffer_stride,
53 uword n_buffer_data_bytes_in_trace)
54{
Dave Barach9b8ffd92016-07-08 08:13:45 -040055 u32 n_left, *from;
Ed Warnickecb9cada2015-12-08 15:45:58 -070056
57 n_left = n_buffers;
58 from = buffers;
Dave Barach9b8ffd92016-07-08 08:13:45 -040059
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 while (n_left >= 4)
61 {
62 u32 bi0, bi1;
Dave Barach9b8ffd92016-07-08 08:13:45 -040063 vlib_buffer_t *b0, *b1;
64 u8 *t0, *t1;
Ed Warnickecb9cada2015-12-08 15:45:58 -070065
66 /* Prefetch next iteration. */
67 vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
68 vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
69
70 bi0 = from[0];
71 bi1 = from[1];
72
73 b0 = vlib_get_buffer (vm, bi0);
74 b1 = vlib_get_buffer (vm, bi1);
75
76 if (b0->flags & VLIB_BUFFER_IS_TRACED)
77 {
78 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050079 clib_memcpy_fast (t0, b0->data + b0->current_data,
80 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 }
82 if (b1->flags & VLIB_BUFFER_IS_TRACED)
83 {
84 t1 = vlib_add_trace (vm, node, b1, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050085 clib_memcpy_fast (t1, b1->data + b1->current_data,
86 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 }
88 from += 2;
89 n_left -= 2;
90 }
91
92 while (n_left >= 1)
93 {
94 u32 bi0;
Dave Barach9b8ffd92016-07-08 08:13:45 -040095 vlib_buffer_t *b0;
96 u8 *t0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070097
98 bi0 = from[0];
99
100 b0 = vlib_get_buffer (vm, bi0);
101
102 if (b0->flags & VLIB_BUFFER_IS_TRACED)
103 {
104 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -0500105 clib_memcpy_fast (t0, b0->data + b0->current_data,
106 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107 }
108 from += 1;
109 n_left -= 1;
110 }
111}
112
113/* Free up all trace buffer memory. */
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400114void
Bud Grise0bcc9d52016-02-02 14:23:29 -0500115clear_trace_buffer (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116{
117 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400118 vlib_trace_main_t *tm;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700119
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100120 foreach_vlib_main ()
121 {
122 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100124 tm->trace_enable = 0;
125 vec_free (tm->nodes);
126 }
Jon Loeligerc0b19542020-05-11 08:43:51 -0500127
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100128 foreach_vlib_main ()
129 {
130 tm = &this_vlib_main->trace_main;
Bud Grised56a6f52016-02-19 12:10:33 -0500131
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100132 for (i = 0; i < vec_len (tm->trace_buffer_pool); i++)
133 if (!pool_is_free_index (tm->trace_buffer_pool, i))
134 vec_free (tm->trace_buffer_pool[i]);
135 pool_free (tm->trace_buffer_pool);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137}
138
Dave Barach1201a802018-11-20 12:08:39 -0500139u8 *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400140format_vlib_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700141{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400142 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
143 vlib_trace_header_t *h = va_arg (*va, vlib_trace_header_t *);
144 vlib_trace_header_t *e = vec_end (h);
145 vlib_node_t *node, *prev_node;
146 clib_time_t *ct = &vm->clib_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147 f64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400148
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149 prev_node = 0;
150 while (h < e)
151 {
152 node = vlib_get_node (vm, h->node_index);
153
154 if (node != prev_node)
155 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400156 t =
157 (h->time - vm->cpu_time_main_loop_start) * ct->seconds_per_clock;
158 s =
159 format (s, "\n%U: %v", format_time_interval, "h:m:s:u", t,
160 node->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 }
162 prev_node = node;
163
164 if (node->format_trace)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400165 s = format (s, "\n %U", node->format_trace, vm, node, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400167 s = format (s, "\n %U", node->format_buffer, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
169 h = vlib_trace_header_next (h);
170 }
171
172 return s;
173}
174
175/* Root of all trace cli commands. */
176VLIB_CLI_COMMAND (trace_cli_command,static) = {
177 .path = "trace",
178 .short_help = "Packet tracer commands",
179};
180
Jon Loeligerc0b19542020-05-11 08:43:51 -0500181int
182trace_time_cmp (void *a1, void *a2)
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500183{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400184 vlib_trace_header_t **t1 = a1;
185 vlib_trace_header_t **t2 = a2;
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500186 i64 dt = t1[0]->time - t2[0]->time;
187 return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
188}
189
Bud Grise0bcc9d52016-02-02 14:23:29 -0500190/*
191 * Return 1 if this packet passes the trace filter, or 0 otherwise
192 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400193u32
194filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500195{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400196 vlib_trace_header_t *e = vec_end (h);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500197
Dave Barach9b8ffd92016-07-08 08:13:45 -0400198 if (tm->filter_flag == 0)
199 return 1;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500200
Dave Barach27d978c2020-11-03 09:59:06 -0500201 /*
202 * When capturing a post-mortem dispatch trace,
203 * toss all existing traces once per dispatch cycle.
204 * So we can trace 4 billion pkts without running out of
205 * memory...
206 */
207 if (tm->filter_flag == FILTER_FLAG_POST_MORTEM)
208 return 0;
209
Bud Grise0bcc9d52016-02-02 14:23:29 -0500210 if (tm->filter_flag == FILTER_FLAG_INCLUDE)
211 {
212 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400213 {
214 if (h->node_index == tm->filter_node_index)
215 return 1;
216 h = vlib_trace_header_next (h);
217 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500218 return 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400219 }
220 else /* FILTER_FLAG_EXCLUDE */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500221 {
222 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400223 {
224 if (h->node_index == tm->filter_node_index)
225 return 0;
226 h = vlib_trace_header_next (h);
227 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500228 return 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400229 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500230
231 return 0;
232}
233
234/*
235 * Remove traces from the trace buffer pool that don't pass the filter
236 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400237void
238trace_apply_filter (vlib_main_t * vm)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500239{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400240 vlib_trace_main_t *tm = &vm->trace_main;
241 vlib_trace_header_t **h;
242 vlib_trace_header_t ***traces_to_remove = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500243 u32 index;
244 u32 trace_index;
245 u32 n_accepted;
246
247 u32 accept;
248
249 if (tm->filter_flag == FILTER_FLAG_NONE)
250 return;
251
252 /*
253 * Ideally we would retain the first N traces that pass the filter instead
254 * of any N traces.
255 */
256 n_accepted = 0;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100257 pool_foreach (h, tm->trace_buffer_pool)
258 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500259 accept = filter_accept(tm, h[0]);
260
261 if ((n_accepted == tm->filter_count) || !accept)
262 vec_add1 (traces_to_remove, h);
263 else
264 n_accepted++;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100265 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500266
267 /* remove all traces that we don't want to keep */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400268 for (index = 0; index < vec_len (traces_to_remove); index++)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500269 {
270 trace_index = traces_to_remove[index] - tm->trace_buffer_pool;
Damjan Marion8bea5892022-04-04 22:40:45 +0200271 vec_set_len (tm->trace_buffer_pool[trace_index], 0);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500272 pool_put_index (tm->trace_buffer_pool, trace_index);
273 }
274
275 vec_free (traces_to_remove);
276}
277
Ed Warnickecb9cada2015-12-08 15:45:58 -0700278static clib_error_t *
279cli_show_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400280 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400282 vlib_trace_main_t *tm;
283 vlib_trace_header_t **h, **traces;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284 u32 i, index = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400285 char *fmt;
286 u8 *s = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500287 u32 max;
288
289 /*
290 * By default display only this many traces. To display more, explicitly
291 * specify a max. This prevents unexpectedly huge outputs.
292 */
293 max = 50;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400294 while (unformat_check_input (input) != (uword) UNFORMAT_END_OF_INPUT)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500295 {
296 if (unformat (input, "max %d", &max))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400297 ;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500298 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400299 return clib_error_create ("expected 'max COUNT', got `%U'",
300 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500301 }
302
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303
304 /* Get active traces from pool. */
305
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100306 foreach_vlib_main ()
307 {
308 fmt = "------------------- Start of thread %d %s -------------------\n";
309 s = format (s, fmt, index, vlib_worker_threads[index].name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100311 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100313 trace_apply_filter (this_vlib_main);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500314
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100315 traces = 0;
316 pool_foreach (h, tm->trace_buffer_pool)
317 {
318 vec_add1 (traces, h[0]);
319 }
320
321 if (vec_len (traces) == 0)
322 {
323 s = format (s, "No packets in trace buffer\n");
324 goto done;
325 }
326
327 /* Sort them by increasing time. */
328 vec_sort_with_function (traces, trace_time_cmp);
329
330 for (i = 0; i < vec_len (traces); i++)
331 {
332 if (i == max)
333 {
334 char *warn = "Limiting display to %d packets."
335 " To display more specify max.";
336 vlib_cli_output (vm, warn, max);
337 s = format (s, warn, max);
338 goto done;
339 }
340
341 s = format (s, "Packet %d\n%U\n\n", i + 1, format_vlib_trace, vm,
342 traces[i]);
343 }
344
345 done:
346 vec_free (traces);
347
348 index++;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100349 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400350
Klement Sekera29396e62016-12-21 03:24:00 +0100351 vlib_cli_output (vm, "%v", s);
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700352 vec_free (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353 return 0;
354}
355
356VLIB_CLI_COMMAND (show_trace_cli,static) = {
357 .path = "show trace",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500358 .short_help = "Show trace buffer [max COUNT]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700359 .function = cli_show_trace_buffer,
360};
361
Dave Barach87d24db2019-12-04 17:19:12 -0500362int vlib_enable_disable_pkt_trace_filter (int enable) __attribute__ ((weak));
Jon Loeligerc0b19542020-05-11 08:43:51 -0500363
Dave Barach87d24db2019-12-04 17:19:12 -0500364int
365vlib_enable_disable_pkt_trace_filter (int enable)
366{
367 return 0;
368}
369
Jon Loeligerc0b19542020-05-11 08:43:51 -0500370void
371vlib_trace_stop_and_clear (void)
372{
373 vlib_enable_disable_pkt_trace_filter (0); /* disble tracing */
374 clear_trace_buffer ();
375}
376
377
378void
379trace_update_capture_options (u32 add, u32 node_index, u32 filter, u8 verbose)
380{
381 vlib_trace_main_t *tm;
382 vlib_trace_node_t *tn;
383
384 if (add == ~0)
385 add = 50;
386
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100387 foreach_vlib_main ()
Jon Loeligerc0b19542020-05-11 08:43:51 -0500388 {
389 tm = &this_vlib_main->trace_main;
390 tm->verbose = verbose;
391 vec_validate (tm->nodes, node_index);
392 tn = tm->nodes + node_index;
393
394 /*
395 * Adding 0 makes no real sense, and there wa no other way
396 * to explicilty zero-out the limits and count, so make
397 * an "add 0" request really be "set to 0".
398 */
399 if (add == 0)
400 tn->limit = tn->count = 0;
401 else
402 tn->limit += add;
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100403 }
Jon Loeligerc0b19542020-05-11 08:43:51 -0500404
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100405 foreach_vlib_main ()
Jon Loeligerc0b19542020-05-11 08:43:51 -0500406 {
407 tm = &this_vlib_main->trace_main;
408 tm->trace_enable = 1;
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100409 }
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400410
411 vlib_enable_disable_pkt_trace_filter (! !filter);
Jon Loeligerc0b19542020-05-11 08:43:51 -0500412}
413
Ed Warnickecb9cada2015-12-08 15:45:58 -0700414static clib_error_t *
415cli_add_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400416 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700417{
Neale Ranns3ee44042016-10-03 13:05:48 +0100418 unformat_input_t _line_input, *line_input = &_line_input;
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200419 vlib_node_t *node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700420 u32 node_index, add;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200421 u8 verbose = 0;
Dave Barach87d24db2019-12-04 17:19:12 -0500422 int filter = 0;
Billy McFalla9a20e72017-02-15 11:39:12 -0500423 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700424
Neale Ranns3ee44042016-10-03 13:05:48 +0100425 if (!unformat_user (input, unformat_line_input, line_input))
426 return 0;
427
Dave Barach11fb09e2020-08-06 12:10:09 -0400428 if (vnet_trace_placeholder == 0)
429 vec_validate_aligned (vnet_trace_placeholder, 2048,
430 CLIB_CACHE_LINE_BYTES);
Dave Barachf8b85862018-08-17 18:29:07 -0400431
Neale Ranns3ee44042016-10-03 13:05:48 +0100432 while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT)
Damjan Mariondb7b2692016-06-09 16:16:27 +0200433 {
Neale Ranns3ee44042016-10-03 13:05:48 +0100434 if (unformat (line_input, "%U %d",
435 unformat_vlib_node, vm, &node_index, &add))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200436 ;
Neale Ranns3ee44042016-10-03 13:05:48 +0100437 else if (unformat (line_input, "verbose"))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200438 verbose = 1;
Dave Barach87d24db2019-12-04 17:19:12 -0500439 else if (unformat (line_input, "filter"))
440 filter = 1;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200441 else
Billy McFalla9a20e72017-02-15 11:39:12 -0500442 {
443 error = clib_error_create ("expected NODE COUNT, got `%U'",
444 format_unformat_error, line_input);
445 goto done;
446 }
Damjan Mariondb7b2692016-06-09 16:16:27 +0200447 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700448
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200449 node = vlib_get_node (vm, node_index);
450
451 if ((node->flags & VLIB_NODE_FLAG_TRACE_SUPPORTED) == 0)
452 {
453 error = clib_error_create ("node '%U' doesn't support per-node "
454 "tracing. There may be another way to "
455 "initiate trace on this node.",
456 format_vlib_node_name, vm, node_index);
457 goto done;
458 }
459
Jon Loeligerc0b19542020-05-11 08:43:51 -0500460 trace_update_capture_options (add, node_index, filter, verbose);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700461
Billy McFalla9a20e72017-02-15 11:39:12 -0500462done:
463 unformat_free (line_input);
464
465 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700466}
467
468VLIB_CLI_COMMAND (add_trace_cli,static) = {
469 .path = "trace add",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500470 .short_help = "trace add <input-graph-node> <add'l-pkts-for-node-> [filter] [verbose]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700471 .function = cli_add_trace_buffer,
472};
473
Bud Grise0bcc9d52016-02-02 14:23:29 -0500474/*
475 * Configure a filter for packet traces.
476 *
477 * This supplements the packet trace feature so that only packets matching
478 * the filter are included in the trace. Currently the only filter is to
479 * keep packets that include a certain node in the trace or exclude a certain
480 * node in the trace.
481 *
482 * The count of traced packets in the "trace add" command is still used to
483 * create a certain number of traces. The "trace filter" command specifies
484 * how many of those packets should be retained in the trace.
485 *
486 * For example, 1Mpps of traffic is arriving and one of those packets is being
487 * dropped. To capture the trace for only that dropped packet, you can do:
488 * trace filter include error-drop 1
489 * trace add dpdk-input 1000000
490 * <wait one second>
491 * show trace
492 *
493 * Note that the filter could be implemented by capturing all traces and just
494 * reducing traces displayed by the "show trace" function. But that would
495 * require a lot of memory for storing the traces, making that infeasible.
496 *
497 * To remove traces from the trace pool that do not include a certain node
498 * requires that the trace be "complete" before applying the filter. To
499 * accomplish this, the trace pool is filtered upon each iteraction of the
500 * main vlib loop. Doing so keeps the number of allocated traces down to a
501 * reasonably low number. This requires that tracing for a buffer is not
502 * performed after the vlib main loop interation completes. i.e. you can't
503 * save away a buffer temporarily then inject it back into the graph and
504 * expect that the trace_index is still valid (such as a traffic manager might
505 * do). A new trace buffer should be allocated for those types of packets.
506 *
507 * The filter can be extended to support multiple nodes and other match
508 * criteria (e.g. input sw_if_index, mac address) but for now just checks if
509 * a specified node is in the trace or not in the trace.
510 */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500511
512void
513trace_filter_set (u32 node_index, u32 flag, u32 count)
514{
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100515 foreach_vlib_main ()
516 {
517 vlib_trace_main_t *tm;
Jon Loeligerc0b19542020-05-11 08:43:51 -0500518
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100519 tm = &this_vlib_main->trace_main;
520 tm->filter_node_index = node_index;
521 tm->filter_flag = flag;
522 tm->filter_count = count;
Jon Loeligerc0b19542020-05-11 08:43:51 -0500523
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100524 /*
525 * Clear the trace limits to stop any in-progress tracing
526 * Prevents runaway trace allocations when the filter changes
527 * (or is removed)
528 */
529 vec_free (tm->nodes);
530 }
Jon Loeligerc0b19542020-05-11 08:43:51 -0500531}
532
533
Bud Grise0bcc9d52016-02-02 14:23:29 -0500534static clib_error_t *
535cli_filter_trace (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400536 unformat_input_t * input, vlib_cli_command_t * cmd)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500537{
Bud Grise0bcc9d52016-02-02 14:23:29 -0500538 u32 filter_node_index;
539 u32 filter_flag;
540 u32 filter_count;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500541
542 if (unformat (input, "include %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400543 unformat_vlib_node, vm, &filter_node_index, &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500544 {
545 filter_flag = FILTER_FLAG_INCLUDE;
546 }
547 else if (unformat (input, "exclude %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400548 unformat_vlib_node, vm, &filter_node_index,
549 &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500550 {
551 filter_flag = FILTER_FLAG_EXCLUDE;
552 }
553 else if (unformat (input, "none"))
554 {
555 filter_flag = FILTER_FLAG_NONE;
556 filter_node_index = 0;
557 filter_count = 0;
558 }
559 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400560 return
561 clib_error_create
562 ("expected 'include NODE COUNT' or 'exclude NODE COUNT' or 'none', got `%U'",
563 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500564
Jon Loeligerc0b19542020-05-11 08:43:51 -0500565 trace_filter_set (filter_node_index, filter_flag, filter_count);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500566
567 return 0;
568}
569
570VLIB_CLI_COMMAND (filter_trace_cli,static) = {
571 .path = "trace filter",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500572 .short_help = "trace filter none | [include|exclude] NODE COUNT",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500573 .function = cli_filter_trace,
574};
575
Ed Warnickecb9cada2015-12-08 15:45:58 -0700576static clib_error_t *
577cli_clear_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400578 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700579{
Jon Loeligerc0b19542020-05-11 08:43:51 -0500580 vlib_trace_stop_and_clear ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700581 return 0;
582}
583
584VLIB_CLI_COMMAND (clear_trace_cli,static) = {
585 .path = "clear trace",
586 .short_help = "Clear trace buffer and free memory",
587 .function = cli_clear_trace_buffer,
588};
589
Dave Barach11fb09e2020-08-06 12:10:09 -0400590/* Placeholder function to get us linked in. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400591void
592vlib_trace_cli_reference (void)
593{
594}
595
Dave Barach0a67b482020-06-08 11:17:19 -0400596void *
597vlib_add_trace (vlib_main_t * vm,
598 vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
599{
600 return vlib_add_trace_inline (vm, r, b, n_data_bytes);
601}
602
Mohammed Hawari52fa5f22023-05-26 14:52:50 +0200603vlib_is_packet_traced_fn_t *
604vlib_is_packet_traced_function_from_name (const char *name)
605{
606 vlib_trace_filter_function_registration_t *reg =
607 vlib_trace_filter_main.trace_filter_registration;
608 while (reg)
609 {
610 if (clib_strcmp (reg->name, name) == 0)
611 break;
612 reg = reg->next;
613 }
614 if (!reg)
615 return 0;
616 return reg->function;
617}
Dave Barach0a67b482020-06-08 11:17:19 -0400618
Maxime Peim3f407552023-07-03 17:45:51 +0200619vlib_is_packet_traced_fn_t *
Mohammed Hawari52fa5f22023-05-26 14:52:50 +0200620vlib_is_packet_traced_default_function ()
621{
622 vlib_trace_filter_function_registration_t *reg =
623 vlib_trace_filter_main.trace_filter_registration;
624 vlib_trace_filter_function_registration_t *tmp_reg = reg;
625 while (reg)
626 {
627 if (reg->priority > tmp_reg->priority)
628 tmp_reg = reg;
629 reg = reg->next;
630 }
631 return tmp_reg->function;
632}
Dave Barach0a67b482020-06-08 11:17:19 -0400633
Mohammed Hawari52fa5f22023-05-26 14:52:50 +0200634static clib_error_t *
635vlib_trace_filter_function_init (vlib_main_t *vm)
636{
637 vlib_is_packet_traced_fn_t *default_fn =
638 vlib_is_packet_traced_default_function ();
639 foreach_vlib_main ()
640 {
641 vlib_trace_main_t *tm = &this_vlib_main->trace_main;
642 tm->current_trace_filter_function = default_fn;
643 }
644 return 0;
645}
646
647vlib_trace_filter_main_t vlib_trace_filter_main;
648
649VLIB_INIT_FUNCTION (vlib_trace_filter_function_init);
650
651static clib_error_t *
652show_trace_filter_function (vlib_main_t *vm, unformat_input_t *input,
653 vlib_cli_command_t *cmd)
654{
655 vlib_trace_filter_main_t *tfm = &vlib_trace_filter_main;
656 vlib_trace_main_t *tm = &vm->trace_main;
657 vlib_is_packet_traced_fn_t *current_trace_filter_fn =
658 tm->current_trace_filter_function;
659 vlib_trace_filter_function_registration_t *reg =
660 tfm->trace_filter_registration;
661
662 while (reg)
663 {
664 vlib_cli_output (vm, "%sname:%s description: %s priority: %u",
665 reg->function == current_trace_filter_fn ? "(*) " : "",
666 reg->name, reg->description, reg->priority);
667 reg = reg->next;
668 }
669 return 0;
670}
671
672VLIB_CLI_COMMAND (show_trace_filter_function_cli, static) = {
673 .path = "show trace filter function",
674 .short_help = "show trace filter function",
675 .function = show_trace_filter_function,
676};
677
678uword
679unformat_vlib_trace_filter_function (unformat_input_t *input, va_list *args)
680{
681 vlib_is_packet_traced_fn_t **res =
682 va_arg (*args, vlib_is_packet_traced_fn_t **);
683 vlib_trace_filter_main_t *tfm = &vlib_trace_filter_main;
684
685 vlib_trace_filter_function_registration_t *reg =
686 tfm->trace_filter_registration;
687 while (reg)
688 {
689 if (unformat (input, reg->name))
690 {
691 *res = reg->function;
692 return 1;
693 }
694 reg = reg->next;
695 }
696 return 0;
697}
698
699void
700vlib_set_trace_filter_function (vlib_is_packet_traced_fn_t *x)
701{
702 foreach_vlib_main ()
703 {
704 this_vlib_main->trace_main.current_trace_filter_function = x;
705 }
706}
707
708static clib_error_t *
709set_trace_filter_function (vlib_main_t *vm, unformat_input_t *input,
710 vlib_cli_command_t *cmd)
711{
712 unformat_input_t _line_input, *line_input = &_line_input;
713 vlib_is_packet_traced_fn_t *res = 0;
714 clib_error_t *error = 0;
715
716 if (!unformat_user (input, unformat_line_input, line_input))
717 return 0;
718
719 while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT)
720 {
721 if (unformat (line_input, "%U", unformat_vlib_trace_filter_function,
722 &res))
723 ;
724 else
725 {
726 error = clib_error_create (
727 "expected valid trace filter function, got `%U'",
728 format_unformat_error, line_input);
729 goto done;
730 }
731 }
732 vlib_set_trace_filter_function (res);
733
734done:
735 unformat_free (line_input);
736
737 return error;
738}
739
740VLIB_CLI_COMMAND (set_trace_filter_function_cli, static) = {
741 .path = "set trace filter function",
742 .short_help = "set trace filter function <func_name>",
743 .function = set_trace_filter_function,
744};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400745/*
746 * fd.io coding-style-patch-verification: ON
747 *
748 * Local Variables:
749 * eval: (c-set-style "gnu")
750 * End:
751 */