blob: 9313d41eb7d177c92274d831eca5a0ccdb9bf014 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * trace_funcs.h: VLIB trace buffer.
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_trace_funcs_h
41#define included_vlib_trace_funcs_h
42
Dave Barach11fb09e2020-08-06 12:10:09 -040043extern u8 *vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040044
Ed Warnickecb9cada2015-12-08 15:45:58 -070045always_inline void
46vlib_validate_trace (vlib_trace_main_t * tm, vlib_buffer_t * b)
47{
Dave Baracha638c182019-06-21 18:24:07 -040048 ASSERT (!pool_is_free_index (tm->trace_buffer_pool,
49 vlib_buffer_get_trace_index (b)));
Ed Warnickecb9cada2015-12-08 15:45:58 -070050}
51
Benoît Ganne9a3973e2020-10-02 19:36:57 +020052int vlib_add_handoff_trace (vlib_main_t * vm, vlib_buffer_t * b);
Dave Baracha638c182019-06-21 18:24:07 -040053
Ed Warnickecb9cada2015-12-08 15:45:58 -070054always_inline void *
Dave Barach0a67b482020-06-08 11:17:19 -040055vlib_add_trace_inline (vlib_main_t * vm,
56 vlib_node_runtime_t * r, vlib_buffer_t * b,
57 u32 n_data_bytes)
Ed Warnickecb9cada2015-12-08 15:45:58 -070058{
Dave Barach9b8ffd92016-07-08 08:13:45 -040059 vlib_trace_main_t *tm = &vm->trace_main;
60 vlib_trace_header_t *h;
Benoît Ganne3ec024c2021-02-26 13:30:32 +010061 u32 n_data_words, trace_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -070062
Dave Barach11fb09e2020-08-06 12:10:09 -040063 ASSERT (vnet_trace_placeholder);
Dave Barachf8b85862018-08-17 18:29:07 -040064
Dave Barach87d24db2019-12-04 17:19:12 -050065 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_IS_TRACED) == 0))
Dave Barach11fb09e2020-08-06 12:10:09 -040066 return vnet_trace_placeholder;
Dave Barach87d24db2019-12-04 17:19:12 -050067
Gary Boon4681e072019-05-03 13:30:14 -040068 if (PREDICT_FALSE (tm->add_trace_callback != 0))
69 {
70 return tm->add_trace_callback ((struct vlib_main_t *) vm,
71 (struct vlib_node_runtime_t *) r,
72 (struct vlib_buffer_t *) b,
73 n_data_bytes);
74 }
75 else if (PREDICT_FALSE (tm->trace_enable == 0))
Dave Barachf8b85862018-08-17 18:29:07 -040076 {
Dave Barach11fb09e2020-08-06 12:10:09 -040077 ASSERT (vec_len (vnet_trace_placeholder) >= n_data_bytes + sizeof (*h));
78 return vnet_trace_placeholder;
Dave Barachf8b85862018-08-17 18:29:07 -040079 }
80
Dave Baracha638c182019-06-21 18:24:07 -040081 /* Are we trying to trace a handoff case? */
82 if (PREDICT_FALSE (vlib_buffer_get_trace_thread (b) != vm->thread_index))
Benoît Ganne9a3973e2020-10-02 19:36:57 +020083 if (PREDICT_FALSE (!vlib_add_handoff_trace (vm, b)))
84 return vnet_trace_placeholder;
Dave Baracha638c182019-06-21 18:24:07 -040085
Benoît Ganne3ec024c2021-02-26 13:30:32 +010086 /*
87 * there is a small chance of a race condition with 'clear trace' here: if a
88 * buffer was set to be traced before the 'clear trace' and is still going
89 * through the graph after the 'clear trace', its trace_index is staled as
90 * the pool was destroyed.
91 * The pool may have been re-allocated because of a new traced buffer, and
92 * the trace_index might be valid by pure (bad) luck. In that case the trace
93 * will be a mix of both buffer traces, but this should be acceptable.
94 */
95 trace_index = vlib_buffer_get_trace_index (b);
96 if (PREDICT_FALSE (pool_is_free_index (tm->trace_buffer_pool, trace_index)))
97 return vnet_trace_placeholder;
Ed Warnickecb9cada2015-12-08 15:45:58 -070098
99 n_data_bytes = round_pow2 (n_data_bytes, sizeof (h[0]));
100 n_data_words = n_data_bytes / sizeof (h[0]);
Benoît Ganne3ec024c2021-02-26 13:30:32 +0100101 vec_add2_aligned (tm->trace_buffer_pool[trace_index], h, 1 + n_data_words,
102 sizeof (h[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700103
104 h->time = vm->cpu_time_last_node_dispatch;
105 h->n_data = n_data_words;
106 h->node_index = r->node_index;
107
108 return h->data;
109}
Dave Barach9b8ffd92016-07-08 08:13:45 -0400110
Dave Barach0a67b482020-06-08 11:17:19 -0400111/* Non-inline (typical use-case) version of the above */
112void *vlib_add_trace (vlib_main_t * vm,
113 vlib_node_runtime_t * r, vlib_buffer_t * b,
114 u32 n_data_bytes);
115
Ed Warnickecb9cada2015-12-08 15:45:58 -0700116always_inline vlib_trace_header_t *
117vlib_trace_header_next (vlib_trace_header_t * h)
Dave Barach9b8ffd92016-07-08 08:13:45 -0400118{
119 return h + 1 + h->n_data;
120}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121
122always_inline void
123vlib_free_trace (vlib_main_t * vm, vlib_buffer_t * b)
124{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400125 vlib_trace_main_t *tm = &vm->trace_main;
Dave Baracha638c182019-06-21 18:24:07 -0400126 u32 trace_index = vlib_buffer_get_trace_index (b);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127 vlib_validate_trace (tm, b);
Dave Baracha638c182019-06-21 18:24:07 -0400128 _vec_len (tm->trace_buffer_pool[trace_index]) = 0;
129 pool_put_index (tm->trace_buffer_pool, trace_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130}
131
132always_inline void
133vlib_trace_next_frame (vlib_main_t * vm,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400134 vlib_node_runtime_t * r, u32 next_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700135{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400136 vlib_next_frame_t *nf;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137 nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
138 nf->flags |= VLIB_FRAME_TRACE;
139}
140
Bud Grise0bcc9d52016-02-02 14:23:29 -0500141void trace_apply_filter (vlib_main_t * vm);
Dave Barach87d24db2019-12-04 17:19:12 -0500142int vnet_is_packet_traced (vlib_buffer_t * b,
143 u32 classify_table_index, int func);
144
Bud Grise0bcc9d52016-02-02 14:23:29 -0500145
Benoît Ganne9a3973e2020-10-02 19:36:57 +0200146/*
147 * Mark buffer as traced and allocate trace buffer.
148 * return 1 if the buffer is successfully traced, 0 if not
149 * A buffer might not be traced if tracing is off or if the packet did not
150 * match the filter.
151 */
152always_inline __clib_warn_unused_result int
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153vlib_trace_buffer (vlib_main_t * vm,
154 vlib_node_runtime_t * r,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400155 u32 next_index, vlib_buffer_t * b, int follow_chain)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400157 vlib_trace_main_t *tm = &vm->trace_main;
158 vlib_trace_header_t **h;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700159
Dave Barachf8b85862018-08-17 18:29:07 -0400160 if (PREDICT_FALSE (tm->trace_enable == 0))
Benoît Ganne9a3973e2020-10-02 19:36:57 +0200161 return 0;
Dave Barachf8b85862018-08-17 18:29:07 -0400162
Dave Barach87d24db2019-12-04 17:19:12 -0500163 /* Classifier filter in use? */
164 if (PREDICT_FALSE (vlib_global_main.trace_filter.trace_filter_enable))
165 {
166 /* See if we're supposed to trace this packet... */
Damjan Marionfd8deb42021-03-06 12:26:28 +0100167 if (vnet_is_packet_traced (
168 b, vlib_global_main.trace_filter.classify_table_index,
169 0 /* full classify */) != 1)
Benoît Ganne9a3973e2020-10-02 19:36:57 +0200170 return 0;
Dave Barach87d24db2019-12-04 17:19:12 -0500171 }
172
Bud Grise0bcc9d52016-02-02 14:23:29 -0500173 /*
174 * Apply filter to existing traces to keep number of allocated traces low.
175 * Performed each time around the main loop.
176 */
177 if (tm->last_main_loop_count != vm->main_loop_count)
178 {
179 tm->last_main_loop_count = vm->main_loop_count;
180 trace_apply_filter (vm);
Gary Boon4681e072019-05-03 13:30:14 -0400181
182 if (tm->trace_buffer_callback)
183 (tm->trace_buffer_callback) ((struct vlib_main_t *) vm,
184 (struct vlib_trace_main_t *) tm);
Bud Grise0bcc9d52016-02-02 14:23:29 -0500185 }
186
Neale Ranns6b03ab72019-07-30 07:14:25 -0700187 vlib_trace_next_frame (vm, r, next_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188
189 pool_get (tm->trace_buffer_pool, h);
190
Dave Barach9b8ffd92016-07-08 08:13:45 -0400191 do
192 {
193 b->flags |= VLIB_BUFFER_IS_TRACED;
Dave Baracha638c182019-06-21 18:24:07 -0400194 b->trace_handle = vlib_buffer_make_trace_handle
195 (vm->thread_index, h - tm->trace_buffer_pool);
Dave Barach9b8ffd92016-07-08 08:13:45 -0400196 }
197 while (follow_chain && (b = vlib_get_next_buffer (vm, b)));
Benoît Ganne9a3973e2020-10-02 19:36:57 +0200198
199 return 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700200}
201
202always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400203vlib_buffer_copy_trace_flag (vlib_main_t * vm, vlib_buffer_t * b,
204 u32 bi_target)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700205{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400206 vlib_buffer_t *b_target = vlib_get_buffer (vm, bi_target);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207 b_target->flags |= b->flags & VLIB_BUFFER_IS_TRACED;
Dave Baracha638c182019-06-21 18:24:07 -0400208 b_target->trace_handle = b->trace_handle;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700209}
210
211always_inline u32
212vlib_get_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt)
213{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400214 vlib_trace_main_t *tm = &vm->trace_main;
215 vlib_trace_node_t *tn;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216
217 if (rt->node_index >= vec_len (tm->nodes))
218 return 0;
219 tn = tm->nodes + rt->node_index;
Dave Barach27d978c2020-11-03 09:59:06 -0500220 ASSERT (tn->count <= tn->limit);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221
Dave Barach27d978c2020-11-03 09:59:06 -0500222 return tn->limit - tn->count;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700223}
224
225always_inline void
Dave Barach9b8ffd92016-07-08 08:13:45 -0400226vlib_set_trace_count (vlib_main_t * vm, vlib_node_runtime_t * rt, u32 count)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400228 vlib_trace_main_t *tm = &vm->trace_main;
229 vlib_trace_node_t *tn = vec_elt_at_index (tm->nodes, rt->node_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700230
231 ASSERT (count <= tn->limit);
232 tn->count = tn->limit - count;
233}
234
235/* Helper function for nodes which only trace buffer data. */
236void
237vlib_trace_frame_buffers_only (vlib_main_t * vm,
238 vlib_node_runtime_t * node,
239 u32 * buffers,
240 uword n_buffers,
241 uword next_buffer_stride,
242 uword n_buffer_data_bytes_in_trace);
243
244#endif /* included_vlib_trace_funcs_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400245
246/*
247 * fd.io coding-style-patch-verification: ON
248 *
249 * Local Variables:
250 * eval: (c-set-style "gnu")
251 * End:
252 */