blob: 4bbd9505b71a34f0254a5d49025efc8730551268 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * trace.c: VLIB trace buffer.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vlib/threads.h>
Jon Loeliger5c1e48c2020-10-15 14:41:36 -040042#include <vnet/classify/vnet_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
Dave Barach11fb09e2020-08-06 12:10:09 -040044u8 *vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040045
Ed Warnickecb9cada2015-12-08 15:45:58 -070046/* Helper function for nodes which only trace buffer data. */
47void
48vlib_trace_frame_buffers_only (vlib_main_t * vm,
49 vlib_node_runtime_t * node,
50 u32 * buffers,
51 uword n_buffers,
52 uword next_buffer_stride,
53 uword n_buffer_data_bytes_in_trace)
54{
Dave Barach9b8ffd92016-07-08 08:13:45 -040055 u32 n_left, *from;
Ed Warnickecb9cada2015-12-08 15:45:58 -070056
57 n_left = n_buffers;
58 from = buffers;
Dave Barach9b8ffd92016-07-08 08:13:45 -040059
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 while (n_left >= 4)
61 {
62 u32 bi0, bi1;
Dave Barach9b8ffd92016-07-08 08:13:45 -040063 vlib_buffer_t *b0, *b1;
64 u8 *t0, *t1;
Ed Warnickecb9cada2015-12-08 15:45:58 -070065
66 /* Prefetch next iteration. */
67 vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
68 vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
69
70 bi0 = from[0];
71 bi1 = from[1];
72
73 b0 = vlib_get_buffer (vm, bi0);
74 b1 = vlib_get_buffer (vm, bi1);
75
76 if (b0->flags & VLIB_BUFFER_IS_TRACED)
77 {
78 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050079 clib_memcpy_fast (t0, b0->data + b0->current_data,
80 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 }
82 if (b1->flags & VLIB_BUFFER_IS_TRACED)
83 {
84 t1 = vlib_add_trace (vm, node, b1, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050085 clib_memcpy_fast (t1, b1->data + b1->current_data,
86 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 }
88 from += 2;
89 n_left -= 2;
90 }
91
92 while (n_left >= 1)
93 {
94 u32 bi0;
Dave Barach9b8ffd92016-07-08 08:13:45 -040095 vlib_buffer_t *b0;
96 u8 *t0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070097
98 bi0 = from[0];
99
100 b0 = vlib_get_buffer (vm, bi0);
101
102 if (b0->flags & VLIB_BUFFER_IS_TRACED)
103 {
104 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -0500105 clib_memcpy_fast (t0, b0->data + b0->current_data,
106 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107 }
108 from += 1;
109 n_left -= 1;
110 }
111}
112
113/* Free up all trace buffer memory. */
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400114void
Bud Grise0bcc9d52016-02-02 14:23:29 -0500115clear_trace_buffer (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116{
117 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400118 vlib_trace_main_t *tm;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700119
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100120 foreach_vlib_main ()
121 {
122 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100124 tm->trace_enable = 0;
125 vec_free (tm->nodes);
126 }
Jon Loeligerc0b19542020-05-11 08:43:51 -0500127
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100128 foreach_vlib_main ()
129 {
130 tm = &this_vlib_main->trace_main;
Bud Grised56a6f52016-02-19 12:10:33 -0500131
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100132 for (i = 0; i < vec_len (tm->trace_buffer_pool); i++)
133 if (!pool_is_free_index (tm->trace_buffer_pool, i))
134 vec_free (tm->trace_buffer_pool[i]);
135 pool_free (tm->trace_buffer_pool);
136 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137}
138
Dave Barach1201a802018-11-20 12:08:39 -0500139u8 *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400140format_vlib_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700141{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400142 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
143 vlib_trace_header_t *h = va_arg (*va, vlib_trace_header_t *);
144 vlib_trace_header_t *e = vec_end (h);
145 vlib_node_t *node, *prev_node;
146 clib_time_t *ct = &vm->clib_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147 f64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400148
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149 prev_node = 0;
150 while (h < e)
151 {
152 node = vlib_get_node (vm, h->node_index);
153
154 if (node != prev_node)
155 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400156 t =
157 (h->time - vm->cpu_time_main_loop_start) * ct->seconds_per_clock;
158 s =
159 format (s, "\n%U: %v", format_time_interval, "h:m:s:u", t,
160 node->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 }
162 prev_node = node;
163
164 if (node->format_trace)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400165 s = format (s, "\n %U", node->format_trace, vm, node, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400167 s = format (s, "\n %U", node->format_buffer, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168
169 h = vlib_trace_header_next (h);
170 }
171
172 return s;
173}
174
175/* Root of all trace cli commands. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400176/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177VLIB_CLI_COMMAND (trace_cli_command,static) = {
178 .path = "trace",
179 .short_help = "Packet tracer commands",
180};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400181/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700182
Jon Loeligerc0b19542020-05-11 08:43:51 -0500183int
184trace_time_cmp (void *a1, void *a2)
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500185{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400186 vlib_trace_header_t **t1 = a1;
187 vlib_trace_header_t **t2 = a2;
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500188 i64 dt = t1[0]->time - t2[0]->time;
189 return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
190}
191
Bud Grise0bcc9d52016-02-02 14:23:29 -0500192/*
193 * Return 1 if this packet passes the trace filter, or 0 otherwise
194 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400195u32
196filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500197{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400198 vlib_trace_header_t *e = vec_end (h);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500199
Dave Barach9b8ffd92016-07-08 08:13:45 -0400200 if (tm->filter_flag == 0)
201 return 1;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500202
Dave Barach27d978c2020-11-03 09:59:06 -0500203 /*
204 * When capturing a post-mortem dispatch trace,
205 * toss all existing traces once per dispatch cycle.
206 * So we can trace 4 billion pkts without running out of
207 * memory...
208 */
209 if (tm->filter_flag == FILTER_FLAG_POST_MORTEM)
210 return 0;
211
Bud Grise0bcc9d52016-02-02 14:23:29 -0500212 if (tm->filter_flag == FILTER_FLAG_INCLUDE)
213 {
214 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400215 {
216 if (h->node_index == tm->filter_node_index)
217 return 1;
218 h = vlib_trace_header_next (h);
219 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500220 return 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400221 }
222 else /* FILTER_FLAG_EXCLUDE */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500223 {
224 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400225 {
226 if (h->node_index == tm->filter_node_index)
227 return 0;
228 h = vlib_trace_header_next (h);
229 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500230 return 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400231 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500232
233 return 0;
234}
235
236/*
237 * Remove traces from the trace buffer pool that don't pass the filter
238 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400239void
240trace_apply_filter (vlib_main_t * vm)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500241{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400242 vlib_trace_main_t *tm = &vm->trace_main;
243 vlib_trace_header_t **h;
244 vlib_trace_header_t ***traces_to_remove = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500245 u32 index;
246 u32 trace_index;
247 u32 n_accepted;
248
249 u32 accept;
250
251 if (tm->filter_flag == FILTER_FLAG_NONE)
252 return;
253
254 /*
255 * Ideally we would retain the first N traces that pass the filter instead
256 * of any N traces.
257 */
258 n_accepted = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400259 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +0100260 pool_foreach (h, tm->trace_buffer_pool)
261 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500262 accept = filter_accept(tm, h[0]);
263
264 if ((n_accepted == tm->filter_count) || !accept)
265 vec_add1 (traces_to_remove, h);
266 else
267 n_accepted++;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100268 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400269 /* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500270
271 /* remove all traces that we don't want to keep */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 for (index = 0; index < vec_len (traces_to_remove); index++)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500273 {
274 trace_index = traces_to_remove[index] - tm->trace_buffer_pool;
275 _vec_len (tm->trace_buffer_pool[trace_index]) = 0;
276 pool_put_index (tm->trace_buffer_pool, trace_index);
277 }
278
279 vec_free (traces_to_remove);
280}
281
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282static clib_error_t *
283cli_show_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400284 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400286 vlib_trace_main_t *tm;
287 vlib_trace_header_t **h, **traces;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288 u32 i, index = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400289 char *fmt;
290 u8 *s = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500291 u32 max;
292
293 /*
294 * By default display only this many traces. To display more, explicitly
295 * specify a max. This prevents unexpectedly huge outputs.
296 */
297 max = 50;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400298 while (unformat_check_input (input) != (uword) UNFORMAT_END_OF_INPUT)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500299 {
300 if (unformat (input, "max %d", &max))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400301 ;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500302 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400303 return clib_error_create ("expected 'max COUNT', got `%U'",
304 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500305 }
306
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307
308 /* Get active traces from pool. */
309
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100310 foreach_vlib_main ()
311 {
312 fmt = "------------------- Start of thread %d %s -------------------\n";
313 s = format (s, fmt, index, vlib_worker_threads[index].name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100315 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100317 trace_apply_filter (this_vlib_main);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500318
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100319 traces = 0;
320 pool_foreach (h, tm->trace_buffer_pool)
321 {
322 vec_add1 (traces, h[0]);
323 }
324
325 if (vec_len (traces) == 0)
326 {
327 s = format (s, "No packets in trace buffer\n");
328 goto done;
329 }
330
331 /* Sort them by increasing time. */
332 vec_sort_with_function (traces, trace_time_cmp);
333
334 for (i = 0; i < vec_len (traces); i++)
335 {
336 if (i == max)
337 {
338 char *warn = "Limiting display to %d packets."
339 " To display more specify max.";
340 vlib_cli_output (vm, warn, max);
341 s = format (s, warn, max);
342 goto done;
343 }
344
345 s = format (s, "Packet %d\n%U\n\n", i + 1, format_vlib_trace, vm,
346 traces[i]);
347 }
348
349 done:
350 vec_free (traces);
351
352 index++;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100353 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400354
Klement Sekera29396e62016-12-21 03:24:00 +0100355 vlib_cli_output (vm, "%v", s);
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700356 vec_free (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357 return 0;
358}
359
Dave Barach9b8ffd92016-07-08 08:13:45 -0400360/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361VLIB_CLI_COMMAND (show_trace_cli,static) = {
362 .path = "show trace",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500363 .short_help = "Show trace buffer [max COUNT]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364 .function = cli_show_trace_buffer,
365};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400366/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367
Dave Barach87d24db2019-12-04 17:19:12 -0500368int vlib_enable_disable_pkt_trace_filter (int enable) __attribute__ ((weak));
Jon Loeligerc0b19542020-05-11 08:43:51 -0500369
Dave Barach87d24db2019-12-04 17:19:12 -0500370int
371vlib_enable_disable_pkt_trace_filter (int enable)
372{
373 return 0;
374}
375
Jon Loeligerc0b19542020-05-11 08:43:51 -0500376void
377vlib_trace_stop_and_clear (void)
378{
379 vlib_enable_disable_pkt_trace_filter (0); /* disble tracing */
380 clear_trace_buffer ();
381}
382
383
384void
385trace_update_capture_options (u32 add, u32 node_index, u32 filter, u8 verbose)
386{
387 vlib_trace_main_t *tm;
388 vlib_trace_node_t *tn;
389
390 if (add == ~0)
391 add = 50;
392
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100393 foreach_vlib_main ()
Jon Loeligerc0b19542020-05-11 08:43:51 -0500394 {
395 tm = &this_vlib_main->trace_main;
396 tm->verbose = verbose;
397 vec_validate (tm->nodes, node_index);
398 tn = tm->nodes + node_index;
399
400 /*
401 * Adding 0 makes no real sense, and there wa no other way
402 * to explicilty zero-out the limits and count, so make
403 * an "add 0" request really be "set to 0".
404 */
405 if (add == 0)
406 tn->limit = tn->count = 0;
407 else
408 tn->limit += add;
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100409 }
Jon Loeligerc0b19542020-05-11 08:43:51 -0500410
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100411 foreach_vlib_main ()
Jon Loeligerc0b19542020-05-11 08:43:51 -0500412 {
413 tm = &this_vlib_main->trace_main;
414 tm->trace_enable = 1;
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100415 }
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400416
417 vlib_enable_disable_pkt_trace_filter (! !filter);
Jon Loeligerc0b19542020-05-11 08:43:51 -0500418}
419
Ed Warnickecb9cada2015-12-08 15:45:58 -0700420static clib_error_t *
421cli_add_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400422 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700423{
Neale Ranns3ee44042016-10-03 13:05:48 +0100424 unformat_input_t _line_input, *line_input = &_line_input;
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200425 vlib_node_t *node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426 u32 node_index, add;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200427 u8 verbose = 0;
Dave Barach87d24db2019-12-04 17:19:12 -0500428 int filter = 0;
Billy McFalla9a20e72017-02-15 11:39:12 -0500429 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430
Neale Ranns3ee44042016-10-03 13:05:48 +0100431 if (!unformat_user (input, unformat_line_input, line_input))
432 return 0;
433
Dave Barach11fb09e2020-08-06 12:10:09 -0400434 if (vnet_trace_placeholder == 0)
435 vec_validate_aligned (vnet_trace_placeholder, 2048,
436 CLIB_CACHE_LINE_BYTES);
Dave Barachf8b85862018-08-17 18:29:07 -0400437
Neale Ranns3ee44042016-10-03 13:05:48 +0100438 while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT)
Damjan Mariondb7b2692016-06-09 16:16:27 +0200439 {
Neale Ranns3ee44042016-10-03 13:05:48 +0100440 if (unformat (line_input, "%U %d",
441 unformat_vlib_node, vm, &node_index, &add))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200442 ;
Neale Ranns3ee44042016-10-03 13:05:48 +0100443 else if (unformat (line_input, "verbose"))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200444 verbose = 1;
Dave Barach87d24db2019-12-04 17:19:12 -0500445 else if (unformat (line_input, "filter"))
446 filter = 1;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200447 else
Billy McFalla9a20e72017-02-15 11:39:12 -0500448 {
449 error = clib_error_create ("expected NODE COUNT, got `%U'",
450 format_unformat_error, line_input);
451 goto done;
452 }
Damjan Mariondb7b2692016-06-09 16:16:27 +0200453 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700454
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200455 node = vlib_get_node (vm, node_index);
456
457 if ((node->flags & VLIB_NODE_FLAG_TRACE_SUPPORTED) == 0)
458 {
459 error = clib_error_create ("node '%U' doesn't support per-node "
460 "tracing. There may be another way to "
461 "initiate trace on this node.",
462 format_vlib_node_name, vm, node_index);
463 goto done;
464 }
465
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400466 u32 filter_table = classify_get_trace_chain ();
467 if (filter && filter_table == ~0)
Dave Barach87d24db2019-12-04 17:19:12 -0500468 {
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400469 error = clib_error_create ("No packet trace filter configured...");
470 goto done;
Dave Barach87d24db2019-12-04 17:19:12 -0500471 }
472
Jon Loeligerc0b19542020-05-11 08:43:51 -0500473 trace_update_capture_options (add, node_index, filter, verbose);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700474
Billy McFalla9a20e72017-02-15 11:39:12 -0500475done:
476 unformat_free (line_input);
477
478 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700479}
480
Dave Barach9b8ffd92016-07-08 08:13:45 -0400481/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700482VLIB_CLI_COMMAND (add_trace_cli,static) = {
483 .path = "trace add",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500484 .short_help = "trace add <input-graph-node> <add'l-pkts-for-node-> [filter] [verbose]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485 .function = cli_add_trace_buffer,
486};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400487/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700488
Bud Grise0bcc9d52016-02-02 14:23:29 -0500489/*
490 * Configure a filter for packet traces.
491 *
492 * This supplements the packet trace feature so that only packets matching
493 * the filter are included in the trace. Currently the only filter is to
494 * keep packets that include a certain node in the trace or exclude a certain
495 * node in the trace.
496 *
497 * The count of traced packets in the "trace add" command is still used to
498 * create a certain number of traces. The "trace filter" command specifies
499 * how many of those packets should be retained in the trace.
500 *
501 * For example, 1Mpps of traffic is arriving and one of those packets is being
502 * dropped. To capture the trace for only that dropped packet, you can do:
503 * trace filter include error-drop 1
504 * trace add dpdk-input 1000000
505 * <wait one second>
506 * show trace
507 *
508 * Note that the filter could be implemented by capturing all traces and just
509 * reducing traces displayed by the "show trace" function. But that would
510 * require a lot of memory for storing the traces, making that infeasible.
511 *
512 * To remove traces from the trace pool that do not include a certain node
513 * requires that the trace be "complete" before applying the filter. To
514 * accomplish this, the trace pool is filtered upon each iteraction of the
515 * main vlib loop. Doing so keeps the number of allocated traces down to a
516 * reasonably low number. This requires that tracing for a buffer is not
517 * performed after the vlib main loop interation completes. i.e. you can't
518 * save away a buffer temporarily then inject it back into the graph and
519 * expect that the trace_index is still valid (such as a traffic manager might
520 * do). A new trace buffer should be allocated for those types of packets.
521 *
522 * The filter can be extended to support multiple nodes and other match
523 * criteria (e.g. input sw_if_index, mac address) but for now just checks if
524 * a specified node is in the trace or not in the trace.
525 */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500526
527void
528trace_filter_set (u32 node_index, u32 flag, u32 count)
529{
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100530 foreach_vlib_main ()
531 {
532 vlib_trace_main_t *tm;
Jon Loeligerc0b19542020-05-11 08:43:51 -0500533
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100534 tm = &this_vlib_main->trace_main;
535 tm->filter_node_index = node_index;
536 tm->filter_flag = flag;
537 tm->filter_count = count;
Jon Loeligerc0b19542020-05-11 08:43:51 -0500538
Damjan Marion92ccf9b2021-03-26 11:38:01 +0100539 /*
540 * Clear the trace limits to stop any in-progress tracing
541 * Prevents runaway trace allocations when the filter changes
542 * (or is removed)
543 */
544 vec_free (tm->nodes);
545 }
Jon Loeligerc0b19542020-05-11 08:43:51 -0500546}
547
548
Bud Grise0bcc9d52016-02-02 14:23:29 -0500549static clib_error_t *
550cli_filter_trace (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400551 unformat_input_t * input, vlib_cli_command_t * cmd)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500552{
Bud Grise0bcc9d52016-02-02 14:23:29 -0500553 u32 filter_node_index;
554 u32 filter_flag;
555 u32 filter_count;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500556
557 if (unformat (input, "include %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400558 unformat_vlib_node, vm, &filter_node_index, &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500559 {
560 filter_flag = FILTER_FLAG_INCLUDE;
561 }
562 else if (unformat (input, "exclude %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400563 unformat_vlib_node, vm, &filter_node_index,
564 &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500565 {
566 filter_flag = FILTER_FLAG_EXCLUDE;
567 }
568 else if (unformat (input, "none"))
569 {
570 filter_flag = FILTER_FLAG_NONE;
571 filter_node_index = 0;
572 filter_count = 0;
573 }
574 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400575 return
576 clib_error_create
577 ("expected 'include NODE COUNT' or 'exclude NODE COUNT' or 'none', got `%U'",
578 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500579
Jon Loeligerc0b19542020-05-11 08:43:51 -0500580 trace_filter_set (filter_node_index, filter_flag, filter_count);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500581
582 return 0;
583}
584
Dave Barach9b8ffd92016-07-08 08:13:45 -0400585/* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500586VLIB_CLI_COMMAND (filter_trace_cli,static) = {
587 .path = "trace filter",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500588 .short_help = "trace filter none | [include|exclude] NODE COUNT",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500589 .function = cli_filter_trace,
590};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400591/* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500592
Ed Warnickecb9cada2015-12-08 15:45:58 -0700593static clib_error_t *
594cli_clear_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400595 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700596{
Jon Loeligerc0b19542020-05-11 08:43:51 -0500597 vlib_trace_stop_and_clear ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700598 return 0;
599}
600
Dave Barach9b8ffd92016-07-08 08:13:45 -0400601/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700602VLIB_CLI_COMMAND (clear_trace_cli,static) = {
603 .path = "clear trace",
604 .short_help = "Clear trace buffer and free memory",
605 .function = cli_clear_trace_buffer,
606};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400607/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700608
Dave Barach11fb09e2020-08-06 12:10:09 -0400609/* Placeholder function to get us linked in. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400610void
611vlib_trace_cli_reference (void)
612{
613}
614
Dave Barach87d24db2019-12-04 17:19:12 -0500615int
616vnet_is_packet_traced (vlib_buffer_t * b,
617 u32 classify_table_index, int func)
618__attribute__ ((weak));
619
620int
621vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func)
622{
623 clib_warning ("BUG: STUB called");
624 return 1;
625}
626
Dave Barach0a67b482020-06-08 11:17:19 -0400627void *
628vlib_add_trace (vlib_main_t * vm,
629 vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
630{
631 return vlib_add_trace_inline (vm, r, b, n_data_bytes);
632}
633
634
635
Dave Barach9b8ffd92016-07-08 08:13:45 -0400636/*
637 * fd.io coding-style-patch-verification: ON
638 *
639 * Local Variables:
640 * eval: (c-set-style "gnu")
641 * End:
642 */