blob: f90f275fa87a9a0d03d1f3255059731a5c6f3f13 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * trace.c: VLIB trace buffer.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vlib/threads.h>
42
Dave Barach11fb09e2020-08-06 12:10:09 -040043u8 *vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040044
Ed Warnickecb9cada2015-12-08 15:45:58 -070045/* Helper function for nodes which only trace buffer data. */
46void
47vlib_trace_frame_buffers_only (vlib_main_t * vm,
48 vlib_node_runtime_t * node,
49 u32 * buffers,
50 uword n_buffers,
51 uword next_buffer_stride,
52 uword n_buffer_data_bytes_in_trace)
53{
Dave Barach9b8ffd92016-07-08 08:13:45 -040054 u32 n_left, *from;
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
56 n_left = n_buffers;
57 from = buffers;
Dave Barach9b8ffd92016-07-08 08:13:45 -040058
Ed Warnickecb9cada2015-12-08 15:45:58 -070059 while (n_left >= 4)
60 {
61 u32 bi0, bi1;
Dave Barach9b8ffd92016-07-08 08:13:45 -040062 vlib_buffer_t *b0, *b1;
63 u8 *t0, *t1;
Ed Warnickecb9cada2015-12-08 15:45:58 -070064
65 /* Prefetch next iteration. */
66 vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
67 vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
68
69 bi0 = from[0];
70 bi1 = from[1];
71
72 b0 = vlib_get_buffer (vm, bi0);
73 b1 = vlib_get_buffer (vm, bi1);
74
75 if (b0->flags & VLIB_BUFFER_IS_TRACED)
76 {
77 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050078 clib_memcpy_fast (t0, b0->data + b0->current_data,
79 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070080 }
81 if (b1->flags & VLIB_BUFFER_IS_TRACED)
82 {
83 t1 = vlib_add_trace (vm, node, b1, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050084 clib_memcpy_fast (t1, b1->data + b1->current_data,
85 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070086 }
87 from += 2;
88 n_left -= 2;
89 }
90
91 while (n_left >= 1)
92 {
93 u32 bi0;
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 vlib_buffer_t *b0;
95 u8 *t0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97 bi0 = from[0];
98
99 b0 = vlib_get_buffer (vm, bi0);
100
101 if (b0->flags & VLIB_BUFFER_IS_TRACED)
102 {
103 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -0500104 clib_memcpy_fast (t0, b0->data + b0->current_data,
105 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106 }
107 from += 1;
108 n_left -= 1;
109 }
110}
111
112/* Free up all trace buffer memory. */
113always_inline void
Bud Grise0bcc9d52016-02-02 14:23:29 -0500114clear_trace_buffer (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700115{
116 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400117 vlib_trace_main_t *tm;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Dave Barach9b8ffd92016-07-08 08:13:45 -0400119 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120 foreach_vlib_main (
121 ({
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Dave Barachf8b85862018-08-17 18:29:07 -0400124 tm->trace_enable = 0;
Jon Loeligerc0b19542020-05-11 08:43:51 -0500125 vec_free (tm->nodes);
126 }));
127
128 foreach_vlib_main (
129 ({
130 tm = &this_vlib_main->trace_main;
Bud Grised56a6f52016-02-19 12:10:33 -0500131
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132 for (i = 0; i < vec_len (tm->trace_buffer_pool); i++)
133 if (! pool_is_free_index (tm->trace_buffer_pool, i))
134 vec_free (tm->trace_buffer_pool[i]);
135 pool_free (tm->trace_buffer_pool);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400137 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700138}
139
Dave Barach1201a802018-11-20 12:08:39 -0500140u8 *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400141format_vlib_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400143 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
144 vlib_trace_header_t *h = va_arg (*va, vlib_trace_header_t *);
145 vlib_trace_header_t *e = vec_end (h);
146 vlib_node_t *node, *prev_node;
147 clib_time_t *ct = &vm->clib_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148 f64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400149
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 prev_node = 0;
151 while (h < e)
152 {
153 node = vlib_get_node (vm, h->node_index);
154
155 if (node != prev_node)
156 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400157 t =
158 (h->time - vm->cpu_time_main_loop_start) * ct->seconds_per_clock;
159 s =
160 format (s, "\n%U: %v", format_time_interval, "h:m:s:u", t,
161 node->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700162 }
163 prev_node = node;
164
165 if (node->format_trace)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400166 s = format (s, "\n %U", node->format_trace, vm, node, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400168 s = format (s, "\n %U", node->format_buffer, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700169
170 h = vlib_trace_header_next (h);
171 }
172
173 return s;
174}
175
176/* Root of all trace cli commands. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400177/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700178VLIB_CLI_COMMAND (trace_cli_command,static) = {
179 .path = "trace",
180 .short_help = "Packet tracer commands",
181};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400182/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183
Jon Loeligerc0b19542020-05-11 08:43:51 -0500184int
185trace_time_cmp (void *a1, void *a2)
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500186{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400187 vlib_trace_header_t **t1 = a1;
188 vlib_trace_header_t **t2 = a2;
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500189 i64 dt = t1[0]->time - t2[0]->time;
190 return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
191}
192
Bud Grise0bcc9d52016-02-02 14:23:29 -0500193/*
194 * Return 1 if this packet passes the trace filter, or 0 otherwise
195 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400196u32
197filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500198{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400199 vlib_trace_header_t *e = vec_end (h);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500200
Dave Barach9b8ffd92016-07-08 08:13:45 -0400201 if (tm->filter_flag == 0)
202 return 1;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500203
Dave Barach27d978c2020-11-03 09:59:06 -0500204 /*
205 * When capturing a post-mortem dispatch trace,
206 * toss all existing traces once per dispatch cycle.
207 * So we can trace 4 billion pkts without running out of
208 * memory...
209 */
210 if (tm->filter_flag == FILTER_FLAG_POST_MORTEM)
211 return 0;
212
Bud Grise0bcc9d52016-02-02 14:23:29 -0500213 if (tm->filter_flag == FILTER_FLAG_INCLUDE)
214 {
215 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400216 {
217 if (h->node_index == tm->filter_node_index)
218 return 1;
219 h = vlib_trace_header_next (h);
220 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500221 return 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400222 }
223 else /* FILTER_FLAG_EXCLUDE */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500224 {
225 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400226 {
227 if (h->node_index == tm->filter_node_index)
228 return 0;
229 h = vlib_trace_header_next (h);
230 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500231 return 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400232 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500233
234 return 0;
235}
236
237/*
238 * Remove traces from the trace buffer pool that don't pass the filter
239 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400240void
241trace_apply_filter (vlib_main_t * vm)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500242{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400243 vlib_trace_main_t *tm = &vm->trace_main;
244 vlib_trace_header_t **h;
245 vlib_trace_header_t ***traces_to_remove = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500246 u32 index;
247 u32 trace_index;
248 u32 n_accepted;
249
250 u32 accept;
251
252 if (tm->filter_flag == FILTER_FLAG_NONE)
253 return;
254
255 /*
256 * Ideally we would retain the first N traces that pass the filter instead
257 * of any N traces.
258 */
259 n_accepted = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400260 /* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500261 pool_foreach (h, tm->trace_buffer_pool,
262 ({
263 accept = filter_accept(tm, h[0]);
264
265 if ((n_accepted == tm->filter_count) || !accept)
266 vec_add1 (traces_to_remove, h);
267 else
268 n_accepted++;
269 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400270 /* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500271
272 /* remove all traces that we don't want to keep */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400273 for (index = 0; index < vec_len (traces_to_remove); index++)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500274 {
275 trace_index = traces_to_remove[index] - tm->trace_buffer_pool;
276 _vec_len (tm->trace_buffer_pool[trace_index]) = 0;
277 pool_put_index (tm->trace_buffer_pool, trace_index);
278 }
279
280 vec_free (traces_to_remove);
281}
282
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283static clib_error_t *
284cli_show_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400285 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400287 vlib_trace_main_t *tm;
288 vlib_trace_header_t **h, **traces;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700289 u32 i, index = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400290 char *fmt;
291 u8 *s = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500292 u32 max;
293
294 /*
295 * By default display only this many traces. To display more, explicitly
296 * specify a max. This prevents unexpectedly huge outputs.
297 */
298 max = 50;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400299 while (unformat_check_input (input) != (uword) UNFORMAT_END_OF_INPUT)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500300 {
301 if (unformat (input, "max %d", &max))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400302 ;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500303 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400304 return clib_error_create ("expected 'max COUNT', got `%U'",
305 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500306 }
307
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
309 /* Get active traces from pool. */
310
Dave Barach9b8ffd92016-07-08 08:13:45 -0400311 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312 foreach_vlib_main (
313 ({
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700314 fmt = "------------------- Start of thread %d %s -------------------\n";
315 s = format (s, fmt, index, vlib_worker_threads[index].name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316
317 tm = &this_vlib_main->trace_main;
318
Bud Grise0bcc9d52016-02-02 14:23:29 -0500319 trace_apply_filter(this_vlib_main);
320
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321 traces = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400322 pool_foreach (h, tm->trace_buffer_pool,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700323 ({
324 vec_add1 (traces, h[0]);
325 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400326
Ed Warnickecb9cada2015-12-08 15:45:58 -0700327 if (vec_len (traces) == 0)
328 {
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700329 s = format (s, "No packets in trace buffer\n");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330 goto done;
331 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400332
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333 /* Sort them by increasing time. */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500334 vec_sort_with_function (traces, trace_time_cmp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400335
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336 for (i = 0; i < vec_len (traces); i++)
337 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500338 if (i == max)
339 {
340 vlib_cli_output (vm, "Limiting display to %d packets."
341 " To display more specify max.", max);
342 goto done;
343 }
344
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700345 s = format (s, "Packet %d\n%U\n\n", i + 1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346 format_vlib_trace, vm, traces[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400348
Ed Warnickecb9cada2015-12-08 15:45:58 -0700349 done:
350 vec_free (traces);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700351
352 index++;
353 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400354 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700355
Klement Sekera29396e62016-12-21 03:24:00 +0100356 vlib_cli_output (vm, "%v", s);
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700357 vec_free (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358 return 0;
359}
360
Dave Barach9b8ffd92016-07-08 08:13:45 -0400361/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700362VLIB_CLI_COMMAND (show_trace_cli,static) = {
363 .path = "show trace",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500364 .short_help = "Show trace buffer [max COUNT]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700365 .function = cli_show_trace_buffer,
366};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400367/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368
Dave Barach87d24db2019-12-04 17:19:12 -0500369int vlib_enable_disable_pkt_trace_filter (int enable) __attribute__ ((weak));
Jon Loeligerc0b19542020-05-11 08:43:51 -0500370
Dave Barach87d24db2019-12-04 17:19:12 -0500371int
372vlib_enable_disable_pkt_trace_filter (int enable)
373{
374 return 0;
375}
376
Jon Loeligerc0b19542020-05-11 08:43:51 -0500377void
378vlib_trace_stop_and_clear (void)
379{
380 vlib_enable_disable_pkt_trace_filter (0); /* disble tracing */
381 clear_trace_buffer ();
382}
383
384
385void
386trace_update_capture_options (u32 add, u32 node_index, u32 filter, u8 verbose)
387{
388 vlib_trace_main_t *tm;
389 vlib_trace_node_t *tn;
390
391 if (add == ~0)
392 add = 50;
393
394 /* *INDENT-OFF* */
395 foreach_vlib_main ((
396 {
397 tm = &this_vlib_main->trace_main;
398 tm->verbose = verbose;
399 vec_validate (tm->nodes, node_index);
400 tn = tm->nodes + node_index;
401
402 /*
403 * Adding 0 makes no real sense, and there wa no other way
404 * to explicilty zero-out the limits and count, so make
405 * an "add 0" request really be "set to 0".
406 */
407 if (add == 0)
408 tn->limit = tn->count = 0;
409 else
410 tn->limit += add;
411 }));
412
413 foreach_vlib_main ((
414 {
415 tm = &this_vlib_main->trace_main;
416 tm->trace_enable = 1;
417 }));
418 /* *INDENT-ON* */
419}
420
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421static clib_error_t *
422cli_add_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400423 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700424{
Neale Ranns3ee44042016-10-03 13:05:48 +0100425 unformat_input_t _line_input, *line_input = &_line_input;
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200426 vlib_node_t *node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700427 u32 node_index, add;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200428 u8 verbose = 0;
Dave Barach87d24db2019-12-04 17:19:12 -0500429 int filter = 0;
Billy McFalla9a20e72017-02-15 11:39:12 -0500430 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700431
Neale Ranns3ee44042016-10-03 13:05:48 +0100432 if (!unformat_user (input, unformat_line_input, line_input))
433 return 0;
434
Dave Barach11fb09e2020-08-06 12:10:09 -0400435 if (vnet_trace_placeholder == 0)
436 vec_validate_aligned (vnet_trace_placeholder, 2048,
437 CLIB_CACHE_LINE_BYTES);
Dave Barachf8b85862018-08-17 18:29:07 -0400438
Neale Ranns3ee44042016-10-03 13:05:48 +0100439 while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT)
Damjan Mariondb7b2692016-06-09 16:16:27 +0200440 {
Neale Ranns3ee44042016-10-03 13:05:48 +0100441 if (unformat (line_input, "%U %d",
442 unformat_vlib_node, vm, &node_index, &add))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200443 ;
Neale Ranns3ee44042016-10-03 13:05:48 +0100444 else if (unformat (line_input, "verbose"))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200445 verbose = 1;
Dave Barach87d24db2019-12-04 17:19:12 -0500446 else if (unformat (line_input, "filter"))
447 filter = 1;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200448 else
Billy McFalla9a20e72017-02-15 11:39:12 -0500449 {
450 error = clib_error_create ("expected NODE COUNT, got `%U'",
451 format_unformat_error, line_input);
452 goto done;
453 }
Damjan Mariondb7b2692016-06-09 16:16:27 +0200454 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700455
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200456 node = vlib_get_node (vm, node_index);
457
458 if ((node->flags & VLIB_NODE_FLAG_TRACE_SUPPORTED) == 0)
459 {
460 error = clib_error_create ("node '%U' doesn't support per-node "
461 "tracing. There may be another way to "
462 "initiate trace on this node.",
463 format_vlib_node_name, vm, node_index);
464 goto done;
465 }
466
Dave Barach87d24db2019-12-04 17:19:12 -0500467 if (filter)
468 {
469 if (vlib_enable_disable_pkt_trace_filter (1 /* enable */ ))
470 {
471 error = clib_error_create ("No packet trace filter configured...");
472 goto done;
473 }
474 }
475
Jon Loeligerc0b19542020-05-11 08:43:51 -0500476 trace_update_capture_options (add, node_index, filter, verbose);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700477
Billy McFalla9a20e72017-02-15 11:39:12 -0500478done:
479 unformat_free (line_input);
480
481 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700482}
483
Dave Barach9b8ffd92016-07-08 08:13:45 -0400484/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485VLIB_CLI_COMMAND (add_trace_cli,static) = {
486 .path = "trace add",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500487 .short_help = "trace add <input-graph-node> <add'l-pkts-for-node-> [filter] [verbose]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700488 .function = cli_add_trace_buffer,
489};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400490/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491
Bud Grise0bcc9d52016-02-02 14:23:29 -0500492/*
493 * Configure a filter for packet traces.
494 *
495 * This supplements the packet trace feature so that only packets matching
496 * the filter are included in the trace. Currently the only filter is to
497 * keep packets that include a certain node in the trace or exclude a certain
498 * node in the trace.
499 *
500 * The count of traced packets in the "trace add" command is still used to
501 * create a certain number of traces. The "trace filter" command specifies
502 * how many of those packets should be retained in the trace.
503 *
504 * For example, 1Mpps of traffic is arriving and one of those packets is being
505 * dropped. To capture the trace for only that dropped packet, you can do:
506 * trace filter include error-drop 1
507 * trace add dpdk-input 1000000
508 * <wait one second>
509 * show trace
510 *
511 * Note that the filter could be implemented by capturing all traces and just
512 * reducing traces displayed by the "show trace" function. But that would
513 * require a lot of memory for storing the traces, making that infeasible.
514 *
515 * To remove traces from the trace pool that do not include a certain node
516 * requires that the trace be "complete" before applying the filter. To
517 * accomplish this, the trace pool is filtered upon each iteraction of the
518 * main vlib loop. Doing so keeps the number of allocated traces down to a
519 * reasonably low number. This requires that tracing for a buffer is not
520 * performed after the vlib main loop interation completes. i.e. you can't
521 * save away a buffer temporarily then inject it back into the graph and
522 * expect that the trace_index is still valid (such as a traffic manager might
523 * do). A new trace buffer should be allocated for those types of packets.
524 *
525 * The filter can be extended to support multiple nodes and other match
526 * criteria (e.g. input sw_if_index, mac address) but for now just checks if
527 * a specified node is in the trace or not in the trace.
528 */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500529
530void
531trace_filter_set (u32 node_index, u32 flag, u32 count)
532{
533 /* *INDENT-OFF* */
534 foreach_vlib_main (
535 ({
536 vlib_trace_main_t *tm;
537
538 tm = &this_vlib_main->trace_main;
539 tm->filter_node_index = node_index;
540 tm->filter_flag = flag;
541 tm->filter_count = count;
542
543 /*
544 * Clear the trace limits to stop any in-progress tracing
545 * Prevents runaway trace allocations when the filter changes
546 * (or is removed)
547 */
548 vec_free (tm->nodes);
549 }));
550 /* *INDENT-ON* */
551}
552
553
Bud Grise0bcc9d52016-02-02 14:23:29 -0500554static clib_error_t *
555cli_filter_trace (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400556 unformat_input_t * input, vlib_cli_command_t * cmd)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500557{
Bud Grise0bcc9d52016-02-02 14:23:29 -0500558 u32 filter_node_index;
559 u32 filter_flag;
560 u32 filter_count;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500561
562 if (unformat (input, "include %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400563 unformat_vlib_node, vm, &filter_node_index, &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500564 {
565 filter_flag = FILTER_FLAG_INCLUDE;
566 }
567 else if (unformat (input, "exclude %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400568 unformat_vlib_node, vm, &filter_node_index,
569 &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500570 {
571 filter_flag = FILTER_FLAG_EXCLUDE;
572 }
573 else if (unformat (input, "none"))
574 {
575 filter_flag = FILTER_FLAG_NONE;
576 filter_node_index = 0;
577 filter_count = 0;
578 }
579 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400580 return
581 clib_error_create
582 ("expected 'include NODE COUNT' or 'exclude NODE COUNT' or 'none', got `%U'",
583 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500584
Jon Loeligerc0b19542020-05-11 08:43:51 -0500585 trace_filter_set (filter_node_index, filter_flag, filter_count);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500586
587 return 0;
588}
589
Dave Barach9b8ffd92016-07-08 08:13:45 -0400590/* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500591VLIB_CLI_COMMAND (filter_trace_cli,static) = {
592 .path = "trace filter",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500593 .short_help = "trace filter none | [include|exclude] NODE COUNT",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500594 .function = cli_filter_trace,
595};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400596/* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500597
Ed Warnickecb9cada2015-12-08 15:45:58 -0700598static clib_error_t *
599cli_clear_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400600 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601{
Jon Loeligerc0b19542020-05-11 08:43:51 -0500602 vlib_trace_stop_and_clear ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700603 return 0;
604}
605
Dave Barach9b8ffd92016-07-08 08:13:45 -0400606/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700607VLIB_CLI_COMMAND (clear_trace_cli,static) = {
608 .path = "clear trace",
609 .short_help = "Clear trace buffer and free memory",
610 .function = cli_clear_trace_buffer,
611};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400612/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700613
Dave Barach11fb09e2020-08-06 12:10:09 -0400614/* Placeholder function to get us linked in. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400615void
616vlib_trace_cli_reference (void)
617{
618}
619
Dave Barach87d24db2019-12-04 17:19:12 -0500620int
621vnet_is_packet_traced (vlib_buffer_t * b,
622 u32 classify_table_index, int func)
623__attribute__ ((weak));
624
625int
626vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func)
627{
628 clib_warning ("BUG: STUB called");
629 return 1;
630}
631
Dave Barach0a67b482020-06-08 11:17:19 -0400632void *
633vlib_add_trace (vlib_main_t * vm,
634 vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
635{
636 return vlib_add_trace_inline (vm, r, b, n_data_bytes);
637}
638
639
640
Dave Barach9b8ffd92016-07-08 08:13:45 -0400641/*
642 * fd.io coding-style-patch-verification: ON
643 *
644 * Local Variables:
645 * eval: (c-set-style "gnu")
646 * End:
647 */