blob: 156378af8e33d225cbf920954b221fa97acaee88 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * trace.c: VLIB trace buffer.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vlib/threads.h>
Jon Loeliger5c1e48c2020-10-15 14:41:36 -040042#include <vnet/classify/vnet_classify.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
Dave Barach11fb09e2020-08-06 12:10:09 -040044u8 *vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040045
Ed Warnickecb9cada2015-12-08 15:45:58 -070046/* Helper function for nodes which only trace buffer data. */
47void
48vlib_trace_frame_buffers_only (vlib_main_t * vm,
49 vlib_node_runtime_t * node,
50 u32 * buffers,
51 uword n_buffers,
52 uword next_buffer_stride,
53 uword n_buffer_data_bytes_in_trace)
54{
Dave Barach9b8ffd92016-07-08 08:13:45 -040055 u32 n_left, *from;
Ed Warnickecb9cada2015-12-08 15:45:58 -070056
57 n_left = n_buffers;
58 from = buffers;
Dave Barach9b8ffd92016-07-08 08:13:45 -040059
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 while (n_left >= 4)
61 {
62 u32 bi0, bi1;
Dave Barach9b8ffd92016-07-08 08:13:45 -040063 vlib_buffer_t *b0, *b1;
64 u8 *t0, *t1;
Ed Warnickecb9cada2015-12-08 15:45:58 -070065
66 /* Prefetch next iteration. */
67 vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
68 vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
69
70 bi0 = from[0];
71 bi1 = from[1];
72
73 b0 = vlib_get_buffer (vm, bi0);
74 b1 = vlib_get_buffer (vm, bi1);
75
76 if (b0->flags & VLIB_BUFFER_IS_TRACED)
77 {
78 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050079 clib_memcpy_fast (t0, b0->data + b0->current_data,
80 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070081 }
82 if (b1->flags & VLIB_BUFFER_IS_TRACED)
83 {
84 t1 = vlib_add_trace (vm, node, b1, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050085 clib_memcpy_fast (t1, b1->data + b1->current_data,
86 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 }
88 from += 2;
89 n_left -= 2;
90 }
91
92 while (n_left >= 1)
93 {
94 u32 bi0;
Dave Barach9b8ffd92016-07-08 08:13:45 -040095 vlib_buffer_t *b0;
96 u8 *t0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070097
98 bi0 = from[0];
99
100 b0 = vlib_get_buffer (vm, bi0);
101
102 if (b0->flags & VLIB_BUFFER_IS_TRACED)
103 {
104 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -0500105 clib_memcpy_fast (t0, b0->data + b0->current_data,
106 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107 }
108 from += 1;
109 n_left -= 1;
110 }
111}
112
113/* Free up all trace buffer memory. */
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400114void
Bud Grise0bcc9d52016-02-02 14:23:29 -0500115clear_trace_buffer (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116{
117 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400118 vlib_trace_main_t *tm;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700119
Dave Barach9b8ffd92016-07-08 08:13:45 -0400120 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121 foreach_vlib_main (
122 ({
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124
Dave Barachf8b85862018-08-17 18:29:07 -0400125 tm->trace_enable = 0;
Jon Loeligerc0b19542020-05-11 08:43:51 -0500126 vec_free (tm->nodes);
127 }));
128
129 foreach_vlib_main (
130 ({
131 tm = &this_vlib_main->trace_main;
Bud Grised56a6f52016-02-19 12:10:33 -0500132
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133 for (i = 0; i < vec_len (tm->trace_buffer_pool); i++)
134 if (! pool_is_free_index (tm->trace_buffer_pool, i))
135 vec_free (tm->trace_buffer_pool[i]);
136 pool_free (tm->trace_buffer_pool);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400138 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139}
140
Dave Barach1201a802018-11-20 12:08:39 -0500141u8 *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400142format_vlib_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400144 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
145 vlib_trace_header_t *h = va_arg (*va, vlib_trace_header_t *);
146 vlib_trace_header_t *e = vec_end (h);
147 vlib_node_t *node, *prev_node;
148 clib_time_t *ct = &vm->clib_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149 f64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400150
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151 prev_node = 0;
152 while (h < e)
153 {
154 node = vlib_get_node (vm, h->node_index);
155
156 if (node != prev_node)
157 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400158 t =
159 (h->time - vm->cpu_time_main_loop_start) * ct->seconds_per_clock;
160 s =
161 format (s, "\n%U: %v", format_time_interval, "h:m:s:u", t,
162 node->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163 }
164 prev_node = node;
165
166 if (node->format_trace)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400167 s = format (s, "\n %U", node->format_trace, vm, node, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700168 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400169 s = format (s, "\n %U", node->format_buffer, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170
171 h = vlib_trace_header_next (h);
172 }
173
174 return s;
175}
176
177/* Root of all trace cli commands. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400178/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700179VLIB_CLI_COMMAND (trace_cli_command,static) = {
180 .path = "trace",
181 .short_help = "Packet tracer commands",
182};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400183/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700184
Jon Loeligerc0b19542020-05-11 08:43:51 -0500185int
186trace_time_cmp (void *a1, void *a2)
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500187{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400188 vlib_trace_header_t **t1 = a1;
189 vlib_trace_header_t **t2 = a2;
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500190 i64 dt = t1[0]->time - t2[0]->time;
191 return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
192}
193
Bud Grise0bcc9d52016-02-02 14:23:29 -0500194/*
195 * Return 1 if this packet passes the trace filter, or 0 otherwise
196 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400197u32
198filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500199{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400200 vlib_trace_header_t *e = vec_end (h);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500201
Dave Barach9b8ffd92016-07-08 08:13:45 -0400202 if (tm->filter_flag == 0)
203 return 1;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500204
Dave Barach27d978c2020-11-03 09:59:06 -0500205 /*
206 * When capturing a post-mortem dispatch trace,
207 * toss all existing traces once per dispatch cycle.
208 * So we can trace 4 billion pkts without running out of
209 * memory...
210 */
211 if (tm->filter_flag == FILTER_FLAG_POST_MORTEM)
212 return 0;
213
Bud Grise0bcc9d52016-02-02 14:23:29 -0500214 if (tm->filter_flag == FILTER_FLAG_INCLUDE)
215 {
216 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400217 {
218 if (h->node_index == tm->filter_node_index)
219 return 1;
220 h = vlib_trace_header_next (h);
221 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500222 return 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400223 }
224 else /* FILTER_FLAG_EXCLUDE */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500225 {
226 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400227 {
228 if (h->node_index == tm->filter_node_index)
229 return 0;
230 h = vlib_trace_header_next (h);
231 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500232 return 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400233 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500234
235 return 0;
236}
237
238/*
239 * Remove traces from the trace buffer pool that don't pass the filter
240 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400241void
242trace_apply_filter (vlib_main_t * vm)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500243{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400244 vlib_trace_main_t *tm = &vm->trace_main;
245 vlib_trace_header_t **h;
246 vlib_trace_header_t ***traces_to_remove = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500247 u32 index;
248 u32 trace_index;
249 u32 n_accepted;
250
251 u32 accept;
252
253 if (tm->filter_flag == FILTER_FLAG_NONE)
254 return;
255
256 /*
257 * Ideally we would retain the first N traces that pass the filter instead
258 * of any N traces.
259 */
260 n_accepted = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400261 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +0100262 pool_foreach (h, tm->trace_buffer_pool)
263 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500264 accept = filter_accept(tm, h[0]);
265
266 if ((n_accepted == tm->filter_count) || !accept)
267 vec_add1 (traces_to_remove, h);
268 else
269 n_accepted++;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100270 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400271 /* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500272
273 /* remove all traces that we don't want to keep */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400274 for (index = 0; index < vec_len (traces_to_remove); index++)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500275 {
276 trace_index = traces_to_remove[index] - tm->trace_buffer_pool;
277 _vec_len (tm->trace_buffer_pool[trace_index]) = 0;
278 pool_put_index (tm->trace_buffer_pool, trace_index);
279 }
280
281 vec_free (traces_to_remove);
282}
283
Ed Warnickecb9cada2015-12-08 15:45:58 -0700284static clib_error_t *
285cli_show_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400286 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400288 vlib_trace_main_t *tm;
289 vlib_trace_header_t **h, **traces;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 u32 i, index = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400291 char *fmt;
292 u8 *s = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500293 u32 max;
294
295 /*
296 * By default display only this many traces. To display more, explicitly
297 * specify a max. This prevents unexpectedly huge outputs.
298 */
299 max = 50;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400300 while (unformat_check_input (input) != (uword) UNFORMAT_END_OF_INPUT)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500301 {
302 if (unformat (input, "max %d", &max))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400303 ;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500304 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400305 return clib_error_create ("expected 'max COUNT', got `%U'",
306 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500307 }
308
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309
310 /* Get active traces from pool. */
311
Dave Barach9b8ffd92016-07-08 08:13:45 -0400312 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313 foreach_vlib_main (
314 ({
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700315 fmt = "------------------- Start of thread %d %s -------------------\n";
316 s = format (s, fmt, index, vlib_worker_threads[index].name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317
318 tm = &this_vlib_main->trace_main;
319
Bud Grise0bcc9d52016-02-02 14:23:29 -0500320 trace_apply_filter(this_vlib_main);
321
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322 traces = 0;
Damjan Marionb2c31b62020-12-13 21:47:40 +0100323 pool_foreach (h, tm->trace_buffer_pool)
324 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325 vec_add1 (traces, h[0]);
Damjan Marionb2c31b62020-12-13 21:47:40 +0100326 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400327
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328 if (vec_len (traces) == 0)
329 {
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700330 s = format (s, "No packets in trace buffer\n");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700331 goto done;
332 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400333
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334 /* Sort them by increasing time. */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500335 vec_sort_with_function (traces, trace_time_cmp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400336
Ed Warnickecb9cada2015-12-08 15:45:58 -0700337 for (i = 0; i < vec_len (traces); i++)
338 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500339 if (i == max)
340 {
341 vlib_cli_output (vm, "Limiting display to %d packets."
342 " To display more specify max.", max);
343 goto done;
344 }
345
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700346 s = format (s, "Packet %d\n%U\n\n", i + 1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347 format_vlib_trace, vm, traces[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700348 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400349
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350 done:
351 vec_free (traces);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352
353 index++;
354 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400355 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700356
Klement Sekera29396e62016-12-21 03:24:00 +0100357 vlib_cli_output (vm, "%v", s);
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700358 vec_free (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700359 return 0;
360}
361
Dave Barach9b8ffd92016-07-08 08:13:45 -0400362/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700363VLIB_CLI_COMMAND (show_trace_cli,static) = {
364 .path = "show trace",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500365 .short_help = "Show trace buffer [max COUNT]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700366 .function = cli_show_trace_buffer,
367};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400368/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700369
Dave Barach87d24db2019-12-04 17:19:12 -0500370int vlib_enable_disable_pkt_trace_filter (int enable) __attribute__ ((weak));
Jon Loeligerc0b19542020-05-11 08:43:51 -0500371
Dave Barach87d24db2019-12-04 17:19:12 -0500372int
373vlib_enable_disable_pkt_trace_filter (int enable)
374{
375 return 0;
376}
377
Jon Loeligerc0b19542020-05-11 08:43:51 -0500378void
379vlib_trace_stop_and_clear (void)
380{
381 vlib_enable_disable_pkt_trace_filter (0); /* disble tracing */
382 clear_trace_buffer ();
383}
384
385
386void
387trace_update_capture_options (u32 add, u32 node_index, u32 filter, u8 verbose)
388{
389 vlib_trace_main_t *tm;
390 vlib_trace_node_t *tn;
391
392 if (add == ~0)
393 add = 50;
394
395 /* *INDENT-OFF* */
396 foreach_vlib_main ((
397 {
398 tm = &this_vlib_main->trace_main;
399 tm->verbose = verbose;
400 vec_validate (tm->nodes, node_index);
401 tn = tm->nodes + node_index;
402
403 /*
404 * Adding 0 makes no real sense, and there wa no other way
405 * to explicilty zero-out the limits and count, so make
406 * an "add 0" request really be "set to 0".
407 */
408 if (add == 0)
409 tn->limit = tn->count = 0;
410 else
411 tn->limit += add;
412 }));
413
414 foreach_vlib_main ((
415 {
416 tm = &this_vlib_main->trace_main;
417 tm->trace_enable = 1;
418 }));
419 /* *INDENT-ON* */
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400420
421 vlib_enable_disable_pkt_trace_filter (! !filter);
Jon Loeligerc0b19542020-05-11 08:43:51 -0500422}
423
Ed Warnickecb9cada2015-12-08 15:45:58 -0700424static clib_error_t *
425cli_add_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400426 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700427{
Neale Ranns3ee44042016-10-03 13:05:48 +0100428 unformat_input_t _line_input, *line_input = &_line_input;
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200429 vlib_node_t *node;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430 u32 node_index, add;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200431 u8 verbose = 0;
Dave Barach87d24db2019-12-04 17:19:12 -0500432 int filter = 0;
Billy McFalla9a20e72017-02-15 11:39:12 -0500433 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434
Neale Ranns3ee44042016-10-03 13:05:48 +0100435 if (!unformat_user (input, unformat_line_input, line_input))
436 return 0;
437
Dave Barach11fb09e2020-08-06 12:10:09 -0400438 if (vnet_trace_placeholder == 0)
439 vec_validate_aligned (vnet_trace_placeholder, 2048,
440 CLIB_CACHE_LINE_BYTES);
Dave Barachf8b85862018-08-17 18:29:07 -0400441
Neale Ranns3ee44042016-10-03 13:05:48 +0100442 while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT)
Damjan Mariondb7b2692016-06-09 16:16:27 +0200443 {
Neale Ranns3ee44042016-10-03 13:05:48 +0100444 if (unformat (line_input, "%U %d",
445 unformat_vlib_node, vm, &node_index, &add))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200446 ;
Neale Ranns3ee44042016-10-03 13:05:48 +0100447 else if (unformat (line_input, "verbose"))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200448 verbose = 1;
Dave Barach87d24db2019-12-04 17:19:12 -0500449 else if (unformat (line_input, "filter"))
450 filter = 1;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200451 else
Billy McFalla9a20e72017-02-15 11:39:12 -0500452 {
453 error = clib_error_create ("expected NODE COUNT, got `%U'",
454 format_unformat_error, line_input);
455 goto done;
456 }
Damjan Mariondb7b2692016-06-09 16:16:27 +0200457 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700458
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200459 node = vlib_get_node (vm, node_index);
460
461 if ((node->flags & VLIB_NODE_FLAG_TRACE_SUPPORTED) == 0)
462 {
463 error = clib_error_create ("node '%U' doesn't support per-node "
464 "tracing. There may be another way to "
465 "initiate trace on this node.",
466 format_vlib_node_name, vm, node_index);
467 goto done;
468 }
469
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400470 u32 filter_table = classify_get_trace_chain ();
471 if (filter && filter_table == ~0)
Dave Barach87d24db2019-12-04 17:19:12 -0500472 {
Jon Loeliger5c1e48c2020-10-15 14:41:36 -0400473 error = clib_error_create ("No packet trace filter configured...");
474 goto done;
Dave Barach87d24db2019-12-04 17:19:12 -0500475 }
476
Jon Loeligerc0b19542020-05-11 08:43:51 -0500477 trace_update_capture_options (add, node_index, filter, verbose);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700478
Billy McFalla9a20e72017-02-15 11:39:12 -0500479done:
480 unformat_free (line_input);
481
482 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700483}
484
Dave Barach9b8ffd92016-07-08 08:13:45 -0400485/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700486VLIB_CLI_COMMAND (add_trace_cli,static) = {
487 .path = "trace add",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500488 .short_help = "trace add <input-graph-node> <add'l-pkts-for-node-> [filter] [verbose]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700489 .function = cli_add_trace_buffer,
490};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400491/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700492
Bud Grise0bcc9d52016-02-02 14:23:29 -0500493/*
494 * Configure a filter for packet traces.
495 *
496 * This supplements the packet trace feature so that only packets matching
497 * the filter are included in the trace. Currently the only filter is to
498 * keep packets that include a certain node in the trace or exclude a certain
499 * node in the trace.
500 *
501 * The count of traced packets in the "trace add" command is still used to
502 * create a certain number of traces. The "trace filter" command specifies
503 * how many of those packets should be retained in the trace.
504 *
505 * For example, 1Mpps of traffic is arriving and one of those packets is being
506 * dropped. To capture the trace for only that dropped packet, you can do:
507 * trace filter include error-drop 1
508 * trace add dpdk-input 1000000
509 * <wait one second>
510 * show trace
511 *
512 * Note that the filter could be implemented by capturing all traces and just
513 * reducing traces displayed by the "show trace" function. But that would
514 * require a lot of memory for storing the traces, making that infeasible.
515 *
516 * To remove traces from the trace pool that do not include a certain node
517 * requires that the trace be "complete" before applying the filter. To
518 * accomplish this, the trace pool is filtered upon each iteraction of the
519 * main vlib loop. Doing so keeps the number of allocated traces down to a
520 * reasonably low number. This requires that tracing for a buffer is not
521 * performed after the vlib main loop interation completes. i.e. you can't
522 * save away a buffer temporarily then inject it back into the graph and
523 * expect that the trace_index is still valid (such as a traffic manager might
524 * do). A new trace buffer should be allocated for those types of packets.
525 *
526 * The filter can be extended to support multiple nodes and other match
527 * criteria (e.g. input sw_if_index, mac address) but for now just checks if
528 * a specified node is in the trace or not in the trace.
529 */
Jon Loeligerc0b19542020-05-11 08:43:51 -0500530
531void
532trace_filter_set (u32 node_index, u32 flag, u32 count)
533{
534 /* *INDENT-OFF* */
535 foreach_vlib_main (
536 ({
537 vlib_trace_main_t *tm;
538
539 tm = &this_vlib_main->trace_main;
540 tm->filter_node_index = node_index;
541 tm->filter_flag = flag;
542 tm->filter_count = count;
543
544 /*
545 * Clear the trace limits to stop any in-progress tracing
546 * Prevents runaway trace allocations when the filter changes
547 * (or is removed)
548 */
549 vec_free (tm->nodes);
550 }));
551 /* *INDENT-ON* */
552}
553
554
Bud Grise0bcc9d52016-02-02 14:23:29 -0500555static clib_error_t *
556cli_filter_trace (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400557 unformat_input_t * input, vlib_cli_command_t * cmd)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500558{
Bud Grise0bcc9d52016-02-02 14:23:29 -0500559 u32 filter_node_index;
560 u32 filter_flag;
561 u32 filter_count;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500562
563 if (unformat (input, "include %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400564 unformat_vlib_node, vm, &filter_node_index, &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500565 {
566 filter_flag = FILTER_FLAG_INCLUDE;
567 }
568 else if (unformat (input, "exclude %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400569 unformat_vlib_node, vm, &filter_node_index,
570 &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500571 {
572 filter_flag = FILTER_FLAG_EXCLUDE;
573 }
574 else if (unformat (input, "none"))
575 {
576 filter_flag = FILTER_FLAG_NONE;
577 filter_node_index = 0;
578 filter_count = 0;
579 }
580 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400581 return
582 clib_error_create
583 ("expected 'include NODE COUNT' or 'exclude NODE COUNT' or 'none', got `%U'",
584 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500585
Jon Loeligerc0b19542020-05-11 08:43:51 -0500586 trace_filter_set (filter_node_index, filter_flag, filter_count);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500587
588 return 0;
589}
590
Dave Barach9b8ffd92016-07-08 08:13:45 -0400591/* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500592VLIB_CLI_COMMAND (filter_trace_cli,static) = {
593 .path = "trace filter",
Jon Loeligerc0b19542020-05-11 08:43:51 -0500594 .short_help = "trace filter none | [include|exclude] NODE COUNT",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500595 .function = cli_filter_trace,
596};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400597/* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500598
Ed Warnickecb9cada2015-12-08 15:45:58 -0700599static clib_error_t *
600cli_clear_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400601 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700602{
Jon Loeligerc0b19542020-05-11 08:43:51 -0500603 vlib_trace_stop_and_clear ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604 return 0;
605}
606
Dave Barach9b8ffd92016-07-08 08:13:45 -0400607/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700608VLIB_CLI_COMMAND (clear_trace_cli,static) = {
609 .path = "clear trace",
610 .short_help = "Clear trace buffer and free memory",
611 .function = cli_clear_trace_buffer,
612};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400613/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700614
Dave Barach11fb09e2020-08-06 12:10:09 -0400615/* Placeholder function to get us linked in. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400616void
617vlib_trace_cli_reference (void)
618{
619}
620
Dave Barach87d24db2019-12-04 17:19:12 -0500621int
622vnet_is_packet_traced (vlib_buffer_t * b,
623 u32 classify_table_index, int func)
624__attribute__ ((weak));
625
626int
627vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func)
628{
629 clib_warning ("BUG: STUB called");
630 return 1;
631}
632
Dave Barach0a67b482020-06-08 11:17:19 -0400633void *
634vlib_add_trace (vlib_main_t * vm,
635 vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
636{
637 return vlib_add_trace_inline (vm, r, b, n_data_bytes);
638}
639
640
641
Dave Barach9b8ffd92016-07-08 08:13:45 -0400642/*
643 * fd.io coding-style-patch-verification: ON
644 *
645 * Local Variables:
646 * eval: (c-set-style "gnu")
647 * End:
648 */