blob: 384885a1a535973ab7e28d0b67af62fb427ef5e1 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * trace.c: VLIB trace buffer.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include <vlib/vlib.h>
41#include <vlib/threads.h>
42
Dave Barach11fb09e2020-08-06 12:10:09 -040043u8 *vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040044
Ed Warnickecb9cada2015-12-08 15:45:58 -070045/* Helper function for nodes which only trace buffer data. */
46void
47vlib_trace_frame_buffers_only (vlib_main_t * vm,
48 vlib_node_runtime_t * node,
49 u32 * buffers,
50 uword n_buffers,
51 uword next_buffer_stride,
52 uword n_buffer_data_bytes_in_trace)
53{
Dave Barach9b8ffd92016-07-08 08:13:45 -040054 u32 n_left, *from;
Ed Warnickecb9cada2015-12-08 15:45:58 -070055
56 n_left = n_buffers;
57 from = buffers;
Dave Barach9b8ffd92016-07-08 08:13:45 -040058
Ed Warnickecb9cada2015-12-08 15:45:58 -070059 while (n_left >= 4)
60 {
61 u32 bi0, bi1;
Dave Barach9b8ffd92016-07-08 08:13:45 -040062 vlib_buffer_t *b0, *b1;
63 u8 *t0, *t1;
Ed Warnickecb9cada2015-12-08 15:45:58 -070064
65 /* Prefetch next iteration. */
66 vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
67 vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
68
69 bi0 = from[0];
70 bi1 = from[1];
71
72 b0 = vlib_get_buffer (vm, bi0);
73 b1 = vlib_get_buffer (vm, bi1);
74
75 if (b0->flags & VLIB_BUFFER_IS_TRACED)
76 {
77 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050078 clib_memcpy_fast (t0, b0->data + b0->current_data,
79 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070080 }
81 if (b1->flags & VLIB_BUFFER_IS_TRACED)
82 {
83 t1 = vlib_add_trace (vm, node, b1, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -050084 clib_memcpy_fast (t1, b1->data + b1->current_data,
85 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -070086 }
87 from += 2;
88 n_left -= 2;
89 }
90
91 while (n_left >= 1)
92 {
93 u32 bi0;
Dave Barach9b8ffd92016-07-08 08:13:45 -040094 vlib_buffer_t *b0;
95 u8 *t0;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
97 bi0 = from[0];
98
99 b0 = vlib_get_buffer (vm, bi0);
100
101 if (b0->flags & VLIB_BUFFER_IS_TRACED)
102 {
103 t0 = vlib_add_trace (vm, node, b0, n_buffer_data_bytes_in_trace);
Dave Barach178cf492018-11-13 16:34:13 -0500104 clib_memcpy_fast (t0, b0->data + b0->current_data,
105 n_buffer_data_bytes_in_trace);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106 }
107 from += 1;
108 n_left -= 1;
109 }
110}
111
112/* Free up all trace buffer memory. */
113always_inline void
Bud Grise0bcc9d52016-02-02 14:23:29 -0500114clear_trace_buffer (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700115{
116 int i;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400117 vlib_trace_main_t *tm;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Dave Barach9b8ffd92016-07-08 08:13:45 -0400119 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120 foreach_vlib_main (
121 ({
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122 tm = &this_vlib_main->trace_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700123
Dave Barachf8b85862018-08-17 18:29:07 -0400124 tm->trace_enable = 0;
Bud Grised56a6f52016-02-19 12:10:33 -0500125
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126 for (i = 0; i < vec_len (tm->trace_buffer_pool); i++)
127 if (! pool_is_free_index (tm->trace_buffer_pool, i))
128 vec_free (tm->trace_buffer_pool[i]);
129 pool_free (tm->trace_buffer_pool);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400131 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700132}
133
Dave Barach1201a802018-11-20 12:08:39 -0500134u8 *
Dave Barach9b8ffd92016-07-08 08:13:45 -0400135format_vlib_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400137 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
138 vlib_trace_header_t *h = va_arg (*va, vlib_trace_header_t *);
139 vlib_trace_header_t *e = vec_end (h);
140 vlib_node_t *node, *prev_node;
141 clib_time_t *ct = &vm->clib_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142 f64 t;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400143
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144 prev_node = 0;
145 while (h < e)
146 {
147 node = vlib_get_node (vm, h->node_index);
148
149 if (node != prev_node)
150 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400151 t =
152 (h->time - vm->cpu_time_main_loop_start) * ct->seconds_per_clock;
153 s =
154 format (s, "\n%U: %v", format_time_interval, "h:m:s:u", t,
155 node->name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156 }
157 prev_node = node;
158
159 if (node->format_trace)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400160 s = format (s, "\n %U", node->format_trace, vm, node, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400162 s = format (s, "\n %U", node->format_buffer, h->data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
164 h = vlib_trace_header_next (h);
165 }
166
167 return s;
168}
169
170/* Root of all trace cli commands. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400171/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700172VLIB_CLI_COMMAND (trace_cli_command,static) = {
173 .path = "trace",
174 .short_help = "Packet tracer commands",
175};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400176/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700177
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500178static int
Dave Barach9b8ffd92016-07-08 08:13:45 -0400179trace_cmp (void *a1, void *a2)
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500180{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400181 vlib_trace_header_t **t1 = a1;
182 vlib_trace_header_t **t2 = a2;
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500183 i64 dt = t1[0]->time - t2[0]->time;
184 return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
185}
186
Bud Grise0bcc9d52016-02-02 14:23:29 -0500187/*
188 * Return 1 if this packet passes the trace filter, or 0 otherwise
189 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400190u32
191filter_accept (vlib_trace_main_t * tm, vlib_trace_header_t * h)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500192{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400193 vlib_trace_header_t *e = vec_end (h);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500194
Dave Barach9b8ffd92016-07-08 08:13:45 -0400195 if (tm->filter_flag == 0)
196 return 1;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500197
198 if (tm->filter_flag == FILTER_FLAG_INCLUDE)
199 {
200 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400201 {
202 if (h->node_index == tm->filter_node_index)
203 return 1;
204 h = vlib_trace_header_next (h);
205 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500206 return 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400207 }
208 else /* FILTER_FLAG_EXCLUDE */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500209 {
210 while (h < e)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400211 {
212 if (h->node_index == tm->filter_node_index)
213 return 0;
214 h = vlib_trace_header_next (h);
215 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500216 return 1;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400217 }
Bud Grise0bcc9d52016-02-02 14:23:29 -0500218
219 return 0;
220}
221
222/*
223 * Remove traces from the trace buffer pool that don't pass the filter
224 */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400225void
226trace_apply_filter (vlib_main_t * vm)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500227{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400228 vlib_trace_main_t *tm = &vm->trace_main;
229 vlib_trace_header_t **h;
230 vlib_trace_header_t ***traces_to_remove = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500231 u32 index;
232 u32 trace_index;
233 u32 n_accepted;
234
235 u32 accept;
236
237 if (tm->filter_flag == FILTER_FLAG_NONE)
238 return;
239
240 /*
241 * Ideally we would retain the first N traces that pass the filter instead
242 * of any N traces.
243 */
244 n_accepted = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245 /* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500246 pool_foreach (h, tm->trace_buffer_pool,
247 ({
248 accept = filter_accept(tm, h[0]);
249
250 if ((n_accepted == tm->filter_count) || !accept)
251 vec_add1 (traces_to_remove, h);
252 else
253 n_accepted++;
254 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400255 /* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500256
257 /* remove all traces that we don't want to keep */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400258 for (index = 0; index < vec_len (traces_to_remove); index++)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500259 {
260 trace_index = traces_to_remove[index] - tm->trace_buffer_pool;
261 _vec_len (tm->trace_buffer_pool[trace_index]) = 0;
262 pool_put_index (tm->trace_buffer_pool, trace_index);
263 }
264
265 vec_free (traces_to_remove);
266}
267
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268static clib_error_t *
269cli_show_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400270 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 vlib_trace_main_t *tm;
273 vlib_trace_header_t **h, **traces;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274 u32 i, index = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400275 char *fmt;
276 u8 *s = 0;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500277 u32 max;
278
279 /*
280 * By default display only this many traces. To display more, explicitly
281 * specify a max. This prevents unexpectedly huge outputs.
282 */
283 max = 50;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400284 while (unformat_check_input (input) != (uword) UNFORMAT_END_OF_INPUT)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500285 {
286 if (unformat (input, "max %d", &max))
Dave Barach9b8ffd92016-07-08 08:13:45 -0400287 ;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500288 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400289 return clib_error_create ("expected 'max COUNT', got `%U'",
290 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500291 }
292
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293
294 /* Get active traces from pool. */
295
Dave Barach9b8ffd92016-07-08 08:13:45 -0400296 /* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700297 foreach_vlib_main (
298 ({
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700299 fmt = "------------------- Start of thread %d %s -------------------\n";
300 s = format (s, fmt, index, vlib_worker_threads[index].name);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700301
302 tm = &this_vlib_main->trace_main;
303
Bud Grise0bcc9d52016-02-02 14:23:29 -0500304 trace_apply_filter(this_vlib_main);
305
Ed Warnickecb9cada2015-12-08 15:45:58 -0700306 traces = 0;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400307 pool_foreach (h, tm->trace_buffer_pool,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308 ({
309 vec_add1 (traces, h[0]);
310 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400311
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312 if (vec_len (traces) == 0)
313 {
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700314 s = format (s, "No packets in trace buffer\n");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315 goto done;
316 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400317
Ed Warnickecb9cada2015-12-08 15:45:58 -0700318 /* Sort them by increasing time. */
Matus Fabiand2dc3df2015-12-14 10:31:33 -0500319 vec_sort_with_function (traces, trace_cmp);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400320
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321 for (i = 0; i < vec_len (traces); i++)
322 {
Bud Grise0bcc9d52016-02-02 14:23:29 -0500323 if (i == max)
324 {
325 vlib_cli_output (vm, "Limiting display to %d packets."
326 " To display more specify max.", max);
327 goto done;
328 }
329
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700330 s = format (s, "Packet %d\n%U\n\n", i + 1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700331 format_vlib_trace, vm, traces[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400333
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334 done:
335 vec_free (traces);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336
337 index++;
338 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400339 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700340
Klement Sekera29396e62016-12-21 03:24:00 +0100341 vlib_cli_output (vm, "%v", s);
Ed Warnicke81aa0e52015-12-22 18:55:08 -0700342 vec_free (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700343 return 0;
344}
345
Dave Barach9b8ffd92016-07-08 08:13:45 -0400346/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347VLIB_CLI_COMMAND (show_trace_cli,static) = {
348 .path = "show trace",
Bud Grise0bcc9d52016-02-02 14:23:29 -0500349 .short_help = "Show trace buffer [max COUNT]",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700350 .function = cli_show_trace_buffer,
351};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400352/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700353
Dave Barach87d24db2019-12-04 17:19:12 -0500354int vlib_enable_disable_pkt_trace_filter (int enable) __attribute__ ((weak));
355int
356vlib_enable_disable_pkt_trace_filter (int enable)
357{
358 return 0;
359}
360
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361static clib_error_t *
362cli_add_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400363 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700364{
Neale Ranns3ee44042016-10-03 13:05:48 +0100365 unformat_input_t _line_input, *line_input = &_line_input;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400366 vlib_trace_main_t *tm;
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200367 vlib_node_t *node;
Dave Barach9b8ffd92016-07-08 08:13:45 -0400368 vlib_trace_node_t *tn;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700369 u32 node_index, add;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200370 u8 verbose = 0;
Dave Barach87d24db2019-12-04 17:19:12 -0500371 int filter = 0;
Billy McFalla9a20e72017-02-15 11:39:12 -0500372 clib_error_t *error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700373
Neale Ranns3ee44042016-10-03 13:05:48 +0100374 if (!unformat_user (input, unformat_line_input, line_input))
375 return 0;
376
Dave Barach11fb09e2020-08-06 12:10:09 -0400377 if (vnet_trace_placeholder == 0)
378 vec_validate_aligned (vnet_trace_placeholder, 2048,
379 CLIB_CACHE_LINE_BYTES);
Dave Barachf8b85862018-08-17 18:29:07 -0400380
Neale Ranns3ee44042016-10-03 13:05:48 +0100381 while (unformat_check_input (line_input) != (uword) UNFORMAT_END_OF_INPUT)
Damjan Mariondb7b2692016-06-09 16:16:27 +0200382 {
Neale Ranns3ee44042016-10-03 13:05:48 +0100383 if (unformat (line_input, "%U %d",
384 unformat_vlib_node, vm, &node_index, &add))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200385 ;
Neale Ranns3ee44042016-10-03 13:05:48 +0100386 else if (unformat (line_input, "verbose"))
Damjan Mariondb7b2692016-06-09 16:16:27 +0200387 verbose = 1;
Dave Barach87d24db2019-12-04 17:19:12 -0500388 else if (unformat (line_input, "filter"))
389 filter = 1;
Damjan Mariondb7b2692016-06-09 16:16:27 +0200390 else
Billy McFalla9a20e72017-02-15 11:39:12 -0500391 {
392 error = clib_error_create ("expected NODE COUNT, got `%U'",
393 format_unformat_error, line_input);
394 goto done;
395 }
Damjan Mariondb7b2692016-06-09 16:16:27 +0200396 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700397
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200398 node = vlib_get_node (vm, node_index);
399
400 if ((node->flags & VLIB_NODE_FLAG_TRACE_SUPPORTED) == 0)
401 {
402 error = clib_error_create ("node '%U' doesn't support per-node "
403 "tracing. There may be another way to "
404 "initiate trace on this node.",
405 format_vlib_node_name, vm, node_index);
406 goto done;
407 }
408
Dave Barach87d24db2019-12-04 17:19:12 -0500409 if (filter)
410 {
411 if (vlib_enable_disable_pkt_trace_filter (1 /* enable */ ))
412 {
413 error = clib_error_create ("No packet trace filter configured...");
414 goto done;
415 }
416 }
417
Neale Ranns3ee44042016-10-03 13:05:48 +0100418 /* *INDENT-OFF* */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400419 foreach_vlib_main ((
Neale Ranns3ee44042016-10-03 13:05:48 +0100420 {
Neale Ranns3ee44042016-10-03 13:05:48 +0100421 tm = &this_vlib_main->trace_main;
Neale Ranns3ee44042016-10-03 13:05:48 +0100422 tm->verbose = verbose;
Neale Ranns3ee44042016-10-03 13:05:48 +0100423 vec_validate (tm->nodes, node_index);
424 tn = tm->nodes + node_index;
Dave Barachf8b85862018-08-17 18:29:07 -0400425 tn->limit += add;
426 tm->trace_enable = 1;
Neale Ranns3ee44042016-10-03 13:05:48 +0100427 }));
428 /* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700429
Billy McFalla9a20e72017-02-15 11:39:12 -0500430done:
431 unformat_free (line_input);
432
433 return error;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434}
435
Dave Barach9b8ffd92016-07-08 08:13:45 -0400436/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437VLIB_CLI_COMMAND (add_trace_cli,static) = {
438 .path = "trace add",
439 .short_help = "Trace given number of packets",
440 .function = cli_add_trace_buffer,
441};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400442/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700443
Bud Grise0bcc9d52016-02-02 14:23:29 -0500444/*
445 * Configure a filter for packet traces.
446 *
447 * This supplements the packet trace feature so that only packets matching
448 * the filter are included in the trace. Currently the only filter is to
449 * keep packets that include a certain node in the trace or exclude a certain
450 * node in the trace.
451 *
452 * The count of traced packets in the "trace add" command is still used to
453 * create a certain number of traces. The "trace filter" command specifies
454 * how many of those packets should be retained in the trace.
455 *
456 * For example, 1Mpps of traffic is arriving and one of those packets is being
457 * dropped. To capture the trace for only that dropped packet, you can do:
458 * trace filter include error-drop 1
459 * trace add dpdk-input 1000000
460 * <wait one second>
461 * show trace
462 *
463 * Note that the filter could be implemented by capturing all traces and just
464 * reducing traces displayed by the "show trace" function. But that would
465 * require a lot of memory for storing the traces, making that infeasible.
466 *
467 * To remove traces from the trace pool that do not include a certain node
468 * requires that the trace be "complete" before applying the filter. To
469 * accomplish this, the trace pool is filtered upon each iteraction of the
470 * main vlib loop. Doing so keeps the number of allocated traces down to a
471 * reasonably low number. This requires that tracing for a buffer is not
472 * performed after the vlib main loop interation completes. i.e. you can't
473 * save away a buffer temporarily then inject it back into the graph and
474 * expect that the trace_index is still valid (such as a traffic manager might
475 * do). A new trace buffer should be allocated for those types of packets.
476 *
477 * The filter can be extended to support multiple nodes and other match
478 * criteria (e.g. input sw_if_index, mac address) but for now just checks if
479 * a specified node is in the trace or not in the trace.
480 */
481static clib_error_t *
482cli_filter_trace (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400483 unformat_input_t * input, vlib_cli_command_t * cmd)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500484{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400485 vlib_trace_main_t *tm = &vm->trace_main;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500486 u32 filter_node_index;
487 u32 filter_flag;
488 u32 filter_count;
Bud Grise0bcc9d52016-02-02 14:23:29 -0500489
490 if (unformat (input, "include %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400491 unformat_vlib_node, vm, &filter_node_index, &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500492 {
493 filter_flag = FILTER_FLAG_INCLUDE;
494 }
495 else if (unformat (input, "exclude %U %d",
Dave Barach9b8ffd92016-07-08 08:13:45 -0400496 unformat_vlib_node, vm, &filter_node_index,
497 &filter_count))
Bud Grise0bcc9d52016-02-02 14:23:29 -0500498 {
499 filter_flag = FILTER_FLAG_EXCLUDE;
500 }
501 else if (unformat (input, "none"))
502 {
503 filter_flag = FILTER_FLAG_NONE;
504 filter_node_index = 0;
505 filter_count = 0;
506 }
507 else
Dave Barach9b8ffd92016-07-08 08:13:45 -0400508 return
509 clib_error_create
510 ("expected 'include NODE COUNT' or 'exclude NODE COUNT' or 'none', got `%U'",
511 format_unformat_error, input);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500512
Dave Barach9b8ffd92016-07-08 08:13:45 -0400513 /* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500514 foreach_vlib_main (
Dave Barach9b8ffd92016-07-08 08:13:45 -0400515 ({
Bud Grise0bcc9d52016-02-02 14:23:29 -0500516 tm = &this_vlib_main->trace_main;
517 tm->filter_node_index = filter_node_index;
518 tm->filter_flag = filter_flag;
519 tm->filter_count = filter_count;
520
521 /*
522 * Clear the trace limits to stop any in-progress tracing
Dave Barachf8b85862018-08-17 18:29:07 -0400523 * Prevents runaway trace allocations when the filter changes
524 * (or is removed)
Bud Grise0bcc9d52016-02-02 14:23:29 -0500525 */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500526 vec_free (tm->nodes);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500527 }));
Dave Barach9b8ffd92016-07-08 08:13:45 -0400528 /* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500529
530 return 0;
531}
532
Dave Barach9b8ffd92016-07-08 08:13:45 -0400533/* *INDENT-OFF* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500534VLIB_CLI_COMMAND (filter_trace_cli,static) = {
535 .path = "trace filter",
536 .short_help = "filter trace output - include NODE COUNT | exclude NODE COUNT | none",
537 .function = cli_filter_trace,
538};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400539/* *INDENT-ON* */
Bud Grise0bcc9d52016-02-02 14:23:29 -0500540
Ed Warnickecb9cada2015-12-08 15:45:58 -0700541static clib_error_t *
542cli_clear_trace_buffer (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400543 unformat_input_t * input, vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700544{
Dave Barach87d24db2019-12-04 17:19:12 -0500545 vlib_enable_disable_pkt_trace_filter (0 /* enable */ );
Bud Grise0bcc9d52016-02-02 14:23:29 -0500546 clear_trace_buffer ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700547 return 0;
548}
549
Dave Barach9b8ffd92016-07-08 08:13:45 -0400550/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700551VLIB_CLI_COMMAND (clear_trace_cli,static) = {
552 .path = "clear trace",
553 .short_help = "Clear trace buffer and free memory",
554 .function = cli_clear_trace_buffer,
555};
Dave Barach9b8ffd92016-07-08 08:13:45 -0400556/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700557
Dave Barach11fb09e2020-08-06 12:10:09 -0400558/* Placeholder function to get us linked in. */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400559void
560vlib_trace_cli_reference (void)
561{
562}
563
Dave Barach87d24db2019-12-04 17:19:12 -0500564int
565vnet_is_packet_traced (vlib_buffer_t * b,
566 u32 classify_table_index, int func)
567__attribute__ ((weak));
568
569int
570vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func)
571{
572 clib_warning ("BUG: STUB called");
573 return 1;
574}
575
Dave Barach0a67b482020-06-08 11:17:19 -0400576void *
577vlib_add_trace (vlib_main_t * vm,
578 vlib_node_runtime_t * r, vlib_buffer_t * b, u32 n_data_bytes)
579{
580 return vlib_add_trace_inline (vm, r, b, n_data_bytes);
581}
582
583
584
Dave Barach9b8ffd92016-07-08 08:13:45 -0400585/*
586 * fd.io coding-style-patch-verification: ON
587 *
588 * Local Variables:
589 * eval: (c-set-style "gnu")
590 * End:
591 */