blob: ed2c3ec07024d0bcb04686a64510f1a607bfded7 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/**
2 * @file oprof.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/oprofile.h>
14#include <linux/moduleparam.h>
15#include <linux/workqueue.h>
16#include <linux/time.h>
17#include <linux/mutex.h>
18
19#include "oprof.h"
20#include "event_buffer.h"
21#include "cpu_buffer.h"
22#include "buffer_sync.h"
23#include "oprofile_stats.h"
24
25struct oprofile_operations oprofile_ops;
26
27unsigned long oprofile_started;
28unsigned long oprofile_backtrace_depth;
29static unsigned long is_setup;
30static DEFINE_MUTEX(start_mutex);
31
32/* timer
33 0 - use performance monitoring hardware if available
34 1 - use the timer int mechanism regardless
35 */
36static int timer = 0;
37
38int oprofile_setup(void)
39{
40 int err;
41
42 mutex_lock(&start_mutex);
43
44 if ((err = alloc_cpu_buffers()))
45 goto out;
46
47 if ((err = alloc_event_buffer()))
48 goto out1;
49
50 if (oprofile_ops.setup && (err = oprofile_ops.setup()))
51 goto out2;
52
53 /* Note even though this starts part of the
54 * profiling overhead, it's necessary to prevent
55 * us missing task deaths and eventually oopsing
56 * when trying to process the event buffer.
57 */
58 if (oprofile_ops.sync_start) {
59 int sync_ret = oprofile_ops.sync_start();
60 switch (sync_ret) {
61 case 0:
62 goto post_sync;
63 case 1:
64 goto do_generic;
65 case -1:
66 goto out3;
67 default:
68 goto out3;
69 }
70 }
71do_generic:
72 if ((err = sync_start()))
73 goto out3;
74
75post_sync:
76 is_setup = 1;
77 mutex_unlock(&start_mutex);
78 return 0;
79
80out3:
81 if (oprofile_ops.shutdown)
82 oprofile_ops.shutdown();
83out2:
84 free_event_buffer();
85out1:
86 free_cpu_buffers();
87out:
88 mutex_unlock(&start_mutex);
89 return err;
90}
91
92#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
93
94static void switch_worker(struct work_struct *work);
95static DECLARE_DELAYED_WORK(switch_work, switch_worker);
96
97static void start_switch_worker(void)
98{
99 if (oprofile_ops.switch_events)
100 schedule_delayed_work(&switch_work, oprofile_time_slice);
101}
102
103static void stop_switch_worker(void)
104{
105 cancel_delayed_work_sync(&switch_work);
106}
107
108static void switch_worker(struct work_struct *work)
109{
110 if (oprofile_ops.switch_events())
111 return;
112
113 atomic_inc(&oprofile_stats.multiplex_counter);
114 start_switch_worker();
115}
116
117/* User inputs in ms, converts to jiffies */
118int oprofile_set_timeout(unsigned long val_msec)
119{
120 int err = 0;
121 unsigned long time_slice;
122
123 mutex_lock(&start_mutex);
124
125 if (oprofile_started) {
126 err = -EBUSY;
127 goto out;
128 }
129
130 if (!oprofile_ops.switch_events) {
131 err = -EINVAL;
132 goto out;
133 }
134
135 time_slice = msecs_to_jiffies(val_msec);
136 if (time_slice == MAX_JIFFY_OFFSET) {
137 err = -EINVAL;
138 goto out;
139 }
140
141 oprofile_time_slice = time_slice;
142
143out:
144 mutex_unlock(&start_mutex);
145 return err;
146
147}
148
149#else
150
151static inline void start_switch_worker(void) { }
152static inline void stop_switch_worker(void) { }
153
154#endif
155
156/* Actually start profiling (echo 1>/dev/oprofile/enable) */
157int oprofile_start(void)
158{
159 int err = -EINVAL;
160
161 mutex_lock(&start_mutex);
162
163 if (!is_setup)
164 goto out;
165
166 err = 0;
167
168 if (oprofile_started)
169 goto out;
170
171 oprofile_reset_stats();
172
173 if ((err = oprofile_ops.start()))
174 goto out;
175
176 start_switch_worker();
177
178 oprofile_started = 1;
179out:
180 mutex_unlock(&start_mutex);
181 return err;
182}
183
184
185/* echo 0>/dev/oprofile/enable */
186void oprofile_stop(void)
187{
188 mutex_lock(&start_mutex);
189 if (!oprofile_started)
190 goto out;
191 oprofile_ops.stop();
192 oprofile_started = 0;
193
194 stop_switch_worker();
195
196 /* wake up the daemon to read what remains */
197 wake_up_buffer_waiter();
198out:
199 mutex_unlock(&start_mutex);
200}
201
202
203void oprofile_shutdown(void)
204{
205 mutex_lock(&start_mutex);
206 if (oprofile_ops.sync_stop) {
207 int sync_ret = oprofile_ops.sync_stop();
208 switch (sync_ret) {
209 case 0:
210 goto post_sync;
211 case 1:
212 goto do_generic;
213 default:
214 goto post_sync;
215 }
216 }
217do_generic:
218 sync_stop();
219post_sync:
220 if (oprofile_ops.shutdown)
221 oprofile_ops.shutdown();
222 is_setup = 0;
223 free_event_buffer();
224 free_cpu_buffers();
225 mutex_unlock(&start_mutex);
226}
227
228int oprofile_set_ulong(unsigned long *addr, unsigned long val)
229{
230 int err = -EBUSY;
231
232 mutex_lock(&start_mutex);
233 if (!oprofile_started) {
234 *addr = val;
235 err = 0;
236 }
237 mutex_unlock(&start_mutex);
238
239 return err;
240}
241
242static int timer_mode;
243
244static int __init oprofile_init(void)
245{
246 int err;
247
248 /* always init architecture to setup backtrace support */
249 timer_mode = 0;
250 err = oprofile_arch_init(&oprofile_ops);
251 if (!err) {
252 if (!timer && !oprofilefs_register())
253 return 0;
254 oprofile_arch_exit();
255 }
256
257 /* setup timer mode: */
258 timer_mode = 1;
259 /* no nmi timer mode if oprofile.timer is set */
260 if (timer || op_nmi_timer_init(&oprofile_ops)) {
261 err = oprofile_timer_init(&oprofile_ops);
262 if (err)
263 return err;
264 }
265
266 return oprofilefs_register();
267}
268
269
270static void __exit oprofile_exit(void)
271{
272 oprofilefs_unregister();
273 if (!timer_mode)
274 oprofile_arch_exit();
275}
276
277
278module_init(oprofile_init);
279module_exit(oprofile_exit);
280
281module_param_named(timer, timer, int, 0644);
282MODULE_PARM_DESC(timer, "force use of timer interrupt");
283
284MODULE_LICENSE("GPL");
285MODULE_AUTHOR("John Levon <levon@movementarian.org>");
286MODULE_DESCRIPTION("OProfile system profiler");