blob: 836e8b473a65f2186e2fd9e80c8d9a6168248048 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * trace.c: VLIB trace buffer.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vlib/threads.h>
Jon Loeliger5c1e48c2020-10-15 14:41:36 -040042#include <vnet/classify/vnet_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
Dave Barach11fb09e2020-08-06 12:10:09 -040044u8 *vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040045
Ed Warnickecb9cada2015-12-08 15:45:58 -070046/* Helper function for nodes which only trace buffer data. */
47void
48vlib_trace_frame_buffers_only (vlib_main_t * vm,
49 vlib_node_runtime_t * node,
50 u32 * buffers,
51 uword n_buffers,
52 uword next_buffer_stride,
53 uword n_buffer_data_bytes_in_trace)
54{
Dave Barach9b8ffd92016-07-08 08:13:45 -040055 u32 n_left, *from;
Ed Warnickecb9cada2015-12-08 15:45:58 -070056
57 n_left = n_buffers;
58 from = buffers;
Dave Barach9b8ffd92016-07-08 08:13:45 -040059
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 while (n_left >= 4)
61 {
62 u32 bi0, bi1;
Dave Barach9b8ffd92016-07-08 08:13:45 -040063 vlib_buffer_t *b0, *b1;
64 u8 *t0, *t1;
Ed Warnickecb9cada2015-12-08 15:45:58 -070065
66 /* Prefetch next iteration. */
67 vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
68 vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
69
70 bi0 = from[0];
71 bi1 = from[1];
72
73 b0 = vlib_get_buffer (vm, bi0);
74 b1 = vlib_get_buffer (vm, bi1);
75
76 if (b0->flags & VLIB_BUFFER_IS_TRACED)
77 {
78 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050079 clib_memcpy_fast (t0, b0->data + b0->current_data,
80 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 }
82 if (b1->flags & VLIB_BUFFER_IS_TRACED)
83 {
84 t1 = vlib_add_trace (vm, node, b1, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050085 clib_memcpy_fast (t1, b1->data + b1->current_data,
86 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 }
88 from += 2;
89 n_left -= 2;
90 }
91
92 while (n_left >= 1)
93 {
94 u32 bi0;
Dave Barach9b8ffd92016-07-08 08:13:45 -040095 vlib_buffer_t *b0;
96 u8 *t0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070097
98 bi0 = from[0];
99
100 b0 = vlib_get_buffer (vm, bi0);
101
102 if (b0->flags & VLIB_BUFFER_IS_TRACED)
103 {
104 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -0500105 clib_memcpy_fast (t0, b0->data + b0->current_data,
106 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107 }
108 from += 1;
109 n_left -= 1;
110 }
111}
112
113/* Free up all trace buffer memory. */
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400114void
Bud Grise0bcc9d52016-02-02 14:23:29 -0500115clear_trace_buffer (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116{
117 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400118 vlib_trace_main_t *tm;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700119
Dave Barach9b8ffd92016-07-08 08:13:45 -0400120 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121 foreach_vlib_main (
122 ({
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124
Dave Barachf8b85862018-08-17 18:29:07 -0400125 tm->trace_enable = 0;
Jon Loeligerc0b19542020-05-11 08:43:51 -0500126 vec_free (tm->nodes);
127 }));
128
129 foreach_vlib_main (
130 ({
131 tm = &this_vlib_main->trace_main;
Bud Grised56a6f52016-02-19 12:10:33 -0500132
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133 for (i = 0; i < vec_len (tm->trace_buffer_pool); i++)
134 if (! pool_is_free_index (tm->trace_buffer_pool, i))
135 vec_free (tm->trace_buffer_pool[i]);
136 pool_free (tm->trace_buffer_pool);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400138 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139}
140
Dave Barach1201a802018-11-20 12:08:39 -0500141u8 *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400142format_vlib_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400144 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
145 vlib_trace_header_t *h = va_arg (*va, vlib_trace_header_t *);
146 vlib_trace_header_t *e = vec_end (h);
147 vlib_node_t *node, *prev_node;
148 clib_time_t *ct = &vm->clib_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149 f64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400150
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151 prev_node = 0;
152 while (h < e)
153 {
154 node = vlib_get_node (vm, h->node_index);
155
156 if (node != prev_node)
157 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400158 t =
159 (h->time - vm->cpu_time_main_loop_start) * ct->seconds_per_clock;
160 s =
161 format (s, "\n%U: %v", format_time_interval, "h:m:s:u", t,
162 node->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163 }
164 prev_node = node;
165
166 if (node->format_trace)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400167 s = format (s, "\n %U", node->format_trace, vm, node, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400169 s = format (s, "\n %U", node->format_buffer, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170
171 h = vlib_trace_header_next (h);
172 }
173
174 return s;
175}
176
177/* Root of all trace cli commands. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400178/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700179VLIB_CLI_COMMAND (trace_cli_command,static) = {
180 .path = "trace",
181 .short_help = "Packet tracer commands",
182};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400183/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700184
Jon Loeligerc0b19542020-05-11 08:43:51 -0500185int
186trace_time_cmp (void *a1, void *a2)
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500187{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400188 vlib_trace_header_t **t1 = a1;
189 vlib_trace_header_t **t2 = a2;
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500190 i64 dt = t1[0]->time - t2[0]->time;
191 return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
192}
193
Bud Grise0bcc9d52016-02-02 14:23:29 -0500194/*
195 * Return 1 if this packet passes the trace filter, or 0 otherwise
196 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400197u32
198filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500199{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400200 vlib_trace_header_t *e = vec_end (h);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500201
Dave Barach9b8ffd92016-07-08 08:13:45 -0400202 if (tm->filter_flag == 0)
203 return 1;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500204
Dave Barach27d978c2020-11-03 09:59:06 -0500205 /*
206 * When capturing a post-mortem dispatch trace,
207 * toss all existing traces once per dispatch cycle.
208 * So we can trace 4 billion pkts without running out of
209 * memory...
210 */
211 if (tm->filter_flag == FILTER_FLAG_POST_MORTEM)
212 return 0;
213
Bud Grise0bcc9d52016-02-02 14:23:29 -0500214 if (tm->filter_flag == FILTER_FLAG_INCLUDE)
215 {
216 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400217 {
218 if (h->node_index == tm->filter_node_index)
219 return 1;
220 h = vlib_trace_header_next (h);
221 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500222 return 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400223 }
224 else /* FILTER_FLAG_EXCLUDE */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500225 {
226 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400227 {
228 if (h->node_index == tm->filter_node_index)
229 return 0;
230 h = vlib_trace_header_next (h);
231 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500232 return 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400233 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500234
235 return 0;
236}
237
238/*
239 * Remove traces from the trace buffer pool that don't pass the filter
240 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400241void
242trace_apply_filter (vlib_main_t * vm)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500243{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400244 vlib_trace_main_t *tm = &vm->trace_main;
245 vlib_trace_header_t **h;
246 vlib_trace_header_t ***traces_to_remove = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500247 u32 index;
248 u32 trace_index;
249 u32 n_accepted;
250
251 u32 accept;
252
253 if (tm->filter_flag == FILTER_FLAG_NONE)
254 return;
255
256 /*
257 * Ideally we would retain the first N traces that pass the filter instead
258 * of any N traces.
259 */
260 n_accepted = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400261 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +0100262 pool_foreach (h, tm->trace_buffer_pool)
263 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500264 accept = filter_accept(tm, h[0]);
265
266 if ((n_accepted == tm->filter_count) || !accept)
267 vec_add1 (traces_to_remove, h);
268 else
269 n_accepted++;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100270 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400271 /* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500272
273 /* remove all traces that we don't want to keep */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400274 for (index = 0; index < vec_len (traces_to_remove); index++)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500275 {
276 trace_index = traces_to_remove[index] - tm->trace_buffer_pool;
277 _vec_len (tm->trace_buffer_pool[trace_index]) = 0;
278 pool_put_index (tm->trace_buffer_pool, trace_index);
279 }
280
281 vec_free (traces_to_remove);
282}
283
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284static clib_error_t *
285cli_show_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400286 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400288 vlib_trace_main_t *tm;
289 vlib_trace_header_t **h, **traces;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 u32 i, index = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400291 char *fmt;
292 u8 *s = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500293 u32 max;
294
295 /*
296 * By default display only this many traces. To display more, explicitly
297 * specify a max. This prevents unexpectedly huge outputs.
298 */
299 max = 50;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400300 while (unformat_check_input (input) != (uword) UNFORMAT_END_OF_INPUT)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500301 {
302 if (unformat (input, "max %d", &max))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400303 ;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500304 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400305 return clib_error_create ("expected 'max COUNT', got `%U'",
306 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500307 }
308
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309
310 /* Get active traces from pool. */
311
Dave Barach9b8ffd92016-07-08 08:13:45 -0400312 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313 foreach_vlib_main (
314 ({
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700315 fmt = "------------------- Start of thread %d %s -------------------\n";
316 s = format (s, fmt, index, vlib_worker_threads[index].name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317
318 tm = &this_vlib_main->trace_main;
319
Bud Grise0bcc9d52016-02-02 14:23:29 -0500320 trace_apply_filter(this_vlib_main);
321
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322 traces = 0;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100323 pool_foreach (h, tm->trace_buffer_pool)
324 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325 vec_add1 (traces, h[0]);
Damjan Marionb2c31b62020-12-13 21:47:40 +0100326 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400327
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328 if (vec_len (traces) == 0)
329 {
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700330 s = format (s, "No packets in trace buffer\n");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700331 goto done;
332 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400333
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334 /* Sort them by increasing time. */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500335 vec_sort_with_function (traces, trace_time_cmp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400336
Ed Warnickecb9cada2015-12-08 15:45:58 -0700337 for (i = 0; i < vec_len (traces); i++)
338 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500339 if (i == max)
340 {
Benoît Ganne15036ad2020-10-05 14:55:00 +0200341 char *warn = "Limiting display to %d packets."
342 " To display more specify max.";
343 vlib_cli_output (vm, warn, max);
344 s = format (s, warn, max);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500345 goto done;
346 }
347
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700348 s = format (s, "Packet %d\n%U\n\n", i + 1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700349 format_vlib_trace, vm, traces[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400351
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352 done:
353 vec_free (traces);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354
355 index++;
356 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400357 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700358
Klement Sekera29396e62016-12-21 03:24:00 +0100359 vlib_cli_output (vm, "%v", s);
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700360 vec_free (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361 return 0;
362}
363
Dave Barach9b8ffd92016-07-08 08:13:45 -0400364/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700365VLIB_CLI_COMMAND (show_trace_cli,static) = {
366 .path = "show trace",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500367 .short_help = "Show trace buffer [max COUNT]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368 .function = cli_show_trace_buffer,
369};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400370/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700371
Dave Barach87d24db2019-12-04 17:19:12 -0500372int vlib_enable_disable_pkt_trace_filter (int enable) __attribute__ ((weak));
Jon Loeligerc0b19542020-05-11 08:43:51 -0500373
Dave Barach87d24db2019-12-04 17:19:12 -0500374int
375vlib_enable_disable_pkt_trace_filter (int enable)
376{
377 return 0;
378}
379
Jon Loeligerc0b19542020-05-11 08:43:51 -0500380void
381vlib_trace_stop_and_clear (void)
382{
383 vlib_enable_disable_pkt_trace_filter (0); /* disble tracing */
384 clear_trace_buffer ();
385}
386
387
388void
389trace_update_capture_options (u32 add, u32 node_index, u32 filter, u8 verbose)
390{
391 vlib_trace_main_t *tm;
392 vlib_trace_node_t *tn;
393
394 if (add == ~0)
395 add = 50;
396
397 /* *INDENT-OFF* */
398 foreach_vlib_main ((
399 {
400 tm = &this_vlib_main->trace_main;
401 tm->verbose = verbose;
402 vec_validate (tm->nodes, node_index);
403 tn = tm->nodes + node_index;
404
405 /*
406 * Adding 0 makes no real sense, and there wa no other way
407 * to explicilty zero-out the limits and count, so make
408 * an "add 0" request really be "set to 0".
409 */
410 if (add == 0)
411 tn->limit = tn->count = 0;
412 else
413 tn->limit += add;
414 }));
415
416 foreach_vlib_main ((
417 {
418 tm = &this_vlib_main->trace_main;
419 tm->trace_enable = 1;
420 }));
421 /* *INDENT-ON* */
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400422
423 vlib_enable_disable_pkt_trace_filter (! !filter);
Jon Loeligerc0b19542020-05-11 08:43:51 -0500424}
425
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426static clib_error_t *
427cli_add_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400428 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429{
Neale Ranns3ee44042016-10-03 13:05:48 +0100430 unformat_input_t _line_input, *line_input = &_line_input;
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200431 vlib_node_t *node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700432 u32 node_index, add;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200433 u8 verbose = 0;
Dave Barach87d24db2019-12-04 17:19:12 -0500434 int filter = 0;
Billy McFalla9a20e72017-02-15 11:39:12 -0500435 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436
Neale Ranns3ee44042016-10-03 13:05:48 +0100437 if (!unformat_user (input, unformat_line_input, line_input))
438 return 0;
439
Dave Barach11fb09e2020-08-06 12:10:09 -0400440 if (vnet_trace_placeholder == 0)
441 vec_validate_aligned (vnet_trace_placeholder, 2048,
442 CLIB_CACHE_LINE_BYTES);
Dave Barachf8b85862018-08-17 18:29:07 -0400443
Neale Ranns3ee44042016-10-03 13:05:48 +0100444 while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT)
Damjan Mariondb7b2692016-06-09 16:16:27 +0200445 {
Neale Ranns3ee44042016-10-03 13:05:48 +0100446 if (unformat (line_input, "%U %d",
447 unformat_vlib_node, vm, &node_index, &add))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200448 ;
Neale Ranns3ee44042016-10-03 13:05:48 +0100449 else if (unformat (line_input, "verbose"))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200450 verbose = 1;
Dave Barach87d24db2019-12-04 17:19:12 -0500451 else if (unformat (line_input, "filter"))
452 filter = 1;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200453 else
Billy McFalla9a20e72017-02-15 11:39:12 -0500454 {
455 error = clib_error_create ("expected NODE COUNT, got `%U'",
456 format_unformat_error, line_input);
457 goto done;
458 }
Damjan Mariondb7b2692016-06-09 16:16:27 +0200459 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700460
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200461 node = vlib_get_node (vm, node_index);
462
463 if ((node->flags & VLIB_NODE_FLAG_TRACE_SUPPORTED) == 0)
464 {
465 error = clib_error_create ("node '%U' doesn't support per-node "
466 "tracing. There may be another way to "
467 "initiate trace on this node.",
468 format_vlib_node_name, vm, node_index);
469 goto done;
470 }
471
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400472 u32 filter_table = classify_get_trace_chain ();
473 if (filter && filter_table == ~0)
Dave Barach87d24db2019-12-04 17:19:12 -0500474 {
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400475 error = clib_error_create ("No packet trace filter configured...");
476 goto done;
Dave Barach87d24db2019-12-04 17:19:12 -0500477 }
478
Jon Loeligerc0b19542020-05-11 08:43:51 -0500479 trace_update_capture_options (add, node_index, filter, verbose);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700480
Billy McFalla9a20e72017-02-15 11:39:12 -0500481done:
482 unformat_free (line_input);
483
484 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700485}
486
Dave Barach9b8ffd92016-07-08 08:13:45 -0400487/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700488VLIB_CLI_COMMAND (add_trace_cli,static) = {
489 .path = "trace add",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500490 .short_help = "trace add <input-graph-node> <add'l-pkts-for-node-> [filter] [verbose]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491 .function = cli_add_trace_buffer,
492};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400493/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700494
Bud Grise0bcc9d52016-02-02 14:23:29 -0500495/*
496 * Configure a filter for packet traces.
497 *
498 * This supplements the packet trace feature so that only packets matching
499 * the filter are included in the trace. Currently the only filter is to
500 * keep packets that include a certain node in the trace or exclude a certain
501 * node in the trace.
502 *
503 * The count of traced packets in the "trace add" command is still used to
504 * create a certain number of traces. The "trace filter" command specifies
505 * how many of those packets should be retained in the trace.
506 *
507 * For example, 1Mpps of traffic is arriving and one of those packets is being
508 * dropped. To capture the trace for only that dropped packet, you can do:
509 * trace filter include error-drop 1
510 * trace add dpdk-input 1000000
511 * <wait one second>
512 * show trace
513 *
514 * Note that the filter could be implemented by capturing all traces and just
515 * reducing traces displayed by the "show trace" function. But that would
516 * require a lot of memory for storing the traces, making that infeasible.
517 *
518 * To remove traces from the trace pool that do not include a certain node
519 * requires that the trace be "complete" before applying the filter. To
520 * accomplish this, the trace pool is filtered upon each iteraction of the
521 * main vlib loop. Doing so keeps the number of allocated traces down to a
522 * reasonably low number. This requires that tracing for a buffer is not
523 * performed after the vlib main loop interation completes. i.e. you can't
524 * save away a buffer temporarily then inject it back into the graph and
525 * expect that the trace_index is still valid (such as a traffic manager might
526 * do). A new trace buffer should be allocated for those types of packets.
527 *
528 * The filter can be extended to support multiple nodes and other match
529 * criteria (e.g. input sw_if_index, mac address) but for now just checks if
530 * a specified node is in the trace or not in the trace.
531 */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500532
533void
534trace_filter_set (u32 node_index, u32 flag, u32 count)
535{
536 /* *INDENT-OFF* */
537 foreach_vlib_main (
538 ({
539 vlib_trace_main_t *tm;
540
541 tm = &this_vlib_main->trace_main;
542 tm->filter_node_index = node_index;
543 tm->filter_flag = flag;
544 tm->filter_count = count;
545
546 /*
547 * Clear the trace limits to stop any in-progress tracing
548 * Prevents runaway trace allocations when the filter changes
549 * (or is removed)
550 */
551 vec_free (tm->nodes);
552 }));
553 /* *INDENT-ON* */
554}
555
556
Bud Grise0bcc9d52016-02-02 14:23:29 -0500557static clib_error_t *
558cli_filter_trace (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400559 unformat_input_t * input, vlib_cli_command_t * cmd)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500560{
Bud Grise0bcc9d52016-02-02 14:23:29 -0500561 u32 filter_node_index;
562 u32 filter_flag;
563 u32 filter_count;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500564
565 if (unformat (input, "include %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400566 unformat_vlib_node, vm, &filter_node_index, &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500567 {
568 filter_flag = FILTER_FLAG_INCLUDE;
569 }
570 else if (unformat (input, "exclude %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400571 unformat_vlib_node, vm, &filter_node_index,
572 &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500573 {
574 filter_flag = FILTER_FLAG_EXCLUDE;
575 }
576 else if (unformat (input, "none"))
577 {
578 filter_flag = FILTER_FLAG_NONE;
579 filter_node_index = 0;
580 filter_count = 0;
581 }
582 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400583 return
584 clib_error_create
585 ("expected 'include NODE COUNT' or 'exclude NODE COUNT' or 'none', got `%U'",
586 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500587
Jon Loeligerc0b19542020-05-11 08:43:51 -0500588 trace_filter_set (filter_node_index, filter_flag, filter_count);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500589
590 return 0;
591}
592
Dave Barach9b8ffd92016-07-08 08:13:45 -0400593/* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500594VLIB_CLI_COMMAND (filter_trace_cli,static) = {
595 .path = "trace filter",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500596 .short_help = "trace filter none | [include|exclude] NODE COUNT",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500597 .function = cli_filter_trace,
598};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400599/* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500600
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601static clib_error_t *
602cli_clear_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400603 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604{
Jon Loeligerc0b19542020-05-11 08:43:51 -0500605 vlib_trace_stop_and_clear ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700606 return 0;
607}
608
Dave Barach9b8ffd92016-07-08 08:13:45 -0400609/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700610VLIB_CLI_COMMAND (clear_trace_cli,static) = {
611 .path = "clear trace",
612 .short_help = "Clear trace buffer and free memory",
613 .function = cli_clear_trace_buffer,
614};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400615/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700616
Dave Barach11fb09e2020-08-06 12:10:09 -0400617/* Placeholder function to get us linked in. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400618void
619vlib_trace_cli_reference (void)
620{
621}
622
Dave Barach87d24db2019-12-04 17:19:12 -0500623int
624vnet_is_packet_traced (vlib_buffer_t * b,
625 u32 classify_table_index, int func)
626__attribute__ ((weak));
627
628int
629vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func)
630{
631 clib_warning ("BUG: STUB called");
632 return 1;
633}
634
Dave Barach0a67b482020-06-08 11:17:19 -0400635void *
636vlib_add_trace (vlib_main_t * vm,
637 vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
638{
639 return vlib_add_trace_inline (vm, r, b, n_data_bytes);
640}
641
642
643
Dave Barach9b8ffd92016-07-08 08:13:45 -0400644/*
645 * fd.io coding-style-patch-verification: ON
646 *
647 * Local Variables:
648 * eval: (c-set-style "gnu")
649 * End:
650 */