Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame] | 1 | #include <trace/syscall.h> |
| 2 | #include <trace/events/syscalls.h> |
| 3 | #include <linux/syscalls.h> |
| 4 | #include <linux/slab.h> |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ |
| 7 | #include <linux/ftrace.h> |
| 8 | #include <linux/perf_event.h> |
| 9 | #include <asm/syscall.h> |
| 10 | |
| 11 | #include "trace_output.h" |
| 12 | #include "trace.h" |
| 13 | |
| 14 | static DEFINE_MUTEX(syscall_trace_lock); |
| 15 | |
| 16 | static int syscall_enter_register(struct trace_event_call *event, |
| 17 | enum trace_reg type, void *data); |
| 18 | static int syscall_exit_register(struct trace_event_call *event, |
| 19 | enum trace_reg type, void *data); |
| 20 | |
| 21 | static struct list_head * |
| 22 | syscall_get_enter_fields(struct trace_event_call *call) |
| 23 | { |
| 24 | struct syscall_metadata *entry = call->data; |
| 25 | |
| 26 | return &entry->enter_fields; |
| 27 | } |
| 28 | |
| 29 | extern struct syscall_metadata *__start_syscalls_metadata[]; |
| 30 | extern struct syscall_metadata *__stop_syscalls_metadata[]; |
| 31 | |
| 32 | static struct syscall_metadata **syscalls_metadata; |
| 33 | |
| 34 | #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
| 35 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) |
| 36 | { |
| 37 | /* |
| 38 | * Only compare after the "sys" prefix. Archs that use |
| 39 | * syscall wrappers may have syscalls symbols aliases prefixed |
| 40 | * with ".SyS" or ".sys" instead of "sys", leading to an unwanted |
| 41 | * mismatch. |
| 42 | */ |
| 43 | return !strcmp(sym + 3, name + 3); |
| 44 | } |
| 45 | #endif |
| 46 | |
| 47 | #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS |
| 48 | /* |
| 49 | * Some architectures that allow for 32bit applications |
| 50 | * to run on a 64bit kernel, do not map the syscalls for |
| 51 | * the 32bit tasks the same as they do for 64bit tasks. |
| 52 | * |
| 53 | * *cough*x86*cough* |
| 54 | * |
| 55 | * In such a case, instead of reporting the wrong syscalls, |
| 56 | * simply ignore them. |
| 57 | * |
| 58 | * For an arch to ignore the compat syscalls it needs to |
| 59 | * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as |
| 60 | * define the function arch_trace_is_compat_syscall() to let |
| 61 | * the tracing system know that it should ignore it. |
| 62 | */ |
| 63 | static int |
| 64 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) |
| 65 | { |
| 66 | if (unlikely(arch_trace_is_compat_syscall(regs))) |
| 67 | return -1; |
| 68 | |
| 69 | return syscall_get_nr(task, regs); |
| 70 | } |
| 71 | #else |
| 72 | static inline int |
| 73 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) |
| 74 | { |
| 75 | return syscall_get_nr(task, regs); |
| 76 | } |
| 77 | #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ |
| 78 | |
| 79 | static __init struct syscall_metadata * |
| 80 | find_syscall_meta(unsigned long syscall) |
| 81 | { |
| 82 | struct syscall_metadata **start; |
| 83 | struct syscall_metadata **stop; |
| 84 | char str[KSYM_SYMBOL_LEN]; |
| 85 | |
| 86 | |
| 87 | start = __start_syscalls_metadata; |
| 88 | stop = __stop_syscalls_metadata; |
| 89 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); |
| 90 | |
| 91 | if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) |
| 92 | return NULL; |
| 93 | |
| 94 | for ( ; start < stop; start++) { |
| 95 | if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) |
| 96 | return *start; |
| 97 | } |
| 98 | return NULL; |
| 99 | } |
| 100 | |
| 101 | static struct syscall_metadata *syscall_nr_to_meta(int nr) |
| 102 | { |
| 103 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) |
| 104 | return NULL; |
| 105 | |
| 106 | return syscalls_metadata[nr]; |
| 107 | } |
| 108 | |
| 109 | static enum print_line_t |
| 110 | print_syscall_enter(struct trace_iterator *iter, int flags, |
| 111 | struct trace_event *event) |
| 112 | { |
| 113 | struct trace_array *tr = iter->tr; |
| 114 | struct trace_seq *s = &iter->seq; |
| 115 | struct trace_entry *ent = iter->ent; |
| 116 | struct syscall_trace_enter *trace; |
| 117 | struct syscall_metadata *entry; |
| 118 | int i, syscall; |
| 119 | |
| 120 | trace = (typeof(trace))ent; |
| 121 | syscall = trace->nr; |
| 122 | entry = syscall_nr_to_meta(syscall); |
| 123 | |
| 124 | if (!entry) |
| 125 | goto end; |
| 126 | |
| 127 | if (entry->enter_event->event.type != ent->type) { |
| 128 | WARN_ON_ONCE(1); |
| 129 | goto end; |
| 130 | } |
| 131 | |
| 132 | trace_seq_printf(s, "%s(", entry->name); |
| 133 | |
| 134 | for (i = 0; i < entry->nb_args; i++) { |
| 135 | |
| 136 | if (trace_seq_has_overflowed(s)) |
| 137 | goto end; |
| 138 | |
| 139 | /* parameter types */ |
| 140 | if (tr->trace_flags & TRACE_ITER_VERBOSE) |
| 141 | trace_seq_printf(s, "%s ", entry->types[i]); |
| 142 | |
| 143 | /* parameter values */ |
| 144 | trace_seq_printf(s, "%s: %lx%s", entry->args[i], |
| 145 | trace->args[i], |
| 146 | i == entry->nb_args - 1 ? "" : ", "); |
| 147 | } |
| 148 | |
| 149 | trace_seq_putc(s, ')'); |
| 150 | end: |
| 151 | trace_seq_putc(s, '\n'); |
| 152 | |
| 153 | return trace_handle_return(s); |
| 154 | } |
| 155 | |
| 156 | static enum print_line_t |
| 157 | print_syscall_exit(struct trace_iterator *iter, int flags, |
| 158 | struct trace_event *event) |
| 159 | { |
| 160 | struct trace_seq *s = &iter->seq; |
| 161 | struct trace_entry *ent = iter->ent; |
| 162 | struct syscall_trace_exit *trace; |
| 163 | int syscall; |
| 164 | struct syscall_metadata *entry; |
| 165 | |
| 166 | trace = (typeof(trace))ent; |
| 167 | syscall = trace->nr; |
| 168 | entry = syscall_nr_to_meta(syscall); |
| 169 | |
| 170 | if (!entry) { |
| 171 | trace_seq_putc(s, '\n'); |
| 172 | goto out; |
| 173 | } |
| 174 | |
| 175 | if (entry->exit_event->event.type != ent->type) { |
| 176 | WARN_ON_ONCE(1); |
| 177 | return TRACE_TYPE_UNHANDLED; |
| 178 | } |
| 179 | |
| 180 | trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, |
| 181 | trace->ret); |
| 182 | |
| 183 | out: |
| 184 | return trace_handle_return(s); |
| 185 | } |
| 186 | |
| 187 | extern char *__bad_type_size(void); |
| 188 | |
| 189 | #define SYSCALL_FIELD(type, name) \ |
| 190 | sizeof(type) != sizeof(trace.name) ? \ |
| 191 | __bad_type_size() : \ |
| 192 | #type, #name, offsetof(typeof(trace), name), \ |
| 193 | sizeof(trace.name), is_signed_type(type) |
| 194 | |
| 195 | static int __init |
| 196 | __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) |
| 197 | { |
| 198 | int i; |
| 199 | int pos = 0; |
| 200 | |
| 201 | /* When len=0, we just calculate the needed length */ |
| 202 | #define LEN_OR_ZERO (len ? len - pos : 0) |
| 203 | |
| 204 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); |
| 205 | for (i = 0; i < entry->nb_args; i++) { |
| 206 | pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", |
| 207 | entry->args[i], sizeof(unsigned long), |
| 208 | i == entry->nb_args - 1 ? "" : ", "); |
| 209 | } |
| 210 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); |
| 211 | |
| 212 | for (i = 0; i < entry->nb_args; i++) { |
| 213 | pos += snprintf(buf + pos, LEN_OR_ZERO, |
| 214 | ", ((unsigned long)(REC->%s))", entry->args[i]); |
| 215 | } |
| 216 | |
| 217 | #undef LEN_OR_ZERO |
| 218 | |
| 219 | /* return the length of print_fmt */ |
| 220 | return pos; |
| 221 | } |
| 222 | |
| 223 | static int __init set_syscall_print_fmt(struct trace_event_call *call) |
| 224 | { |
| 225 | char *print_fmt; |
| 226 | int len; |
| 227 | struct syscall_metadata *entry = call->data; |
| 228 | |
| 229 | if (entry->enter_event != call) { |
| 230 | call->print_fmt = "\"0x%lx\", REC->ret"; |
| 231 | return 0; |
| 232 | } |
| 233 | |
| 234 | /* First: called with 0 length to calculate the needed length */ |
| 235 | len = __set_enter_print_fmt(entry, NULL, 0); |
| 236 | |
| 237 | print_fmt = kmalloc(len + 1, GFP_KERNEL); |
| 238 | if (!print_fmt) |
| 239 | return -ENOMEM; |
| 240 | |
| 241 | /* Second: actually write the @print_fmt */ |
| 242 | __set_enter_print_fmt(entry, print_fmt, len + 1); |
| 243 | call->print_fmt = print_fmt; |
| 244 | |
| 245 | return 0; |
| 246 | } |
| 247 | |
| 248 | static void __init free_syscall_print_fmt(struct trace_event_call *call) |
| 249 | { |
| 250 | struct syscall_metadata *entry = call->data; |
| 251 | |
| 252 | if (entry->enter_event == call) |
| 253 | kfree(call->print_fmt); |
| 254 | } |
| 255 | |
| 256 | static int __init syscall_enter_define_fields(struct trace_event_call *call) |
| 257 | { |
| 258 | struct syscall_trace_enter trace; |
| 259 | struct syscall_metadata *meta = call->data; |
| 260 | int ret; |
| 261 | int i; |
| 262 | int offset = offsetof(typeof(trace), args); |
| 263 | |
| 264 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); |
| 265 | if (ret) |
| 266 | return ret; |
| 267 | |
| 268 | for (i = 0; i < meta->nb_args; i++) { |
| 269 | ret = trace_define_field(call, meta->types[i], |
| 270 | meta->args[i], offset, |
| 271 | sizeof(unsigned long), 0, |
| 272 | FILTER_OTHER); |
| 273 | offset += sizeof(unsigned long); |
| 274 | } |
| 275 | |
| 276 | return ret; |
| 277 | } |
| 278 | |
| 279 | static int __init syscall_exit_define_fields(struct trace_event_call *call) |
| 280 | { |
| 281 | struct syscall_trace_exit trace; |
| 282 | int ret; |
| 283 | |
| 284 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); |
| 285 | if (ret) |
| 286 | return ret; |
| 287 | |
| 288 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret), |
| 289 | FILTER_OTHER); |
| 290 | |
| 291 | return ret; |
| 292 | } |
| 293 | |
| 294 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) |
| 295 | { |
| 296 | struct trace_array *tr = data; |
| 297 | struct trace_event_file *trace_file; |
| 298 | struct syscall_trace_enter *entry; |
| 299 | struct syscall_metadata *sys_data; |
| 300 | struct ring_buffer_event *event; |
| 301 | struct ring_buffer *buffer; |
| 302 | unsigned long irq_flags; |
| 303 | int pc; |
| 304 | int syscall_nr; |
| 305 | int size; |
| 306 | |
| 307 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 308 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 309 | return; |
| 310 | |
| 311 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ |
| 312 | trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); |
| 313 | if (!trace_file) |
| 314 | return; |
| 315 | |
| 316 | if (trace_trigger_soft_disabled(trace_file)) |
| 317 | return; |
| 318 | |
| 319 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 320 | if (!sys_data) |
| 321 | return; |
| 322 | |
| 323 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; |
| 324 | |
| 325 | local_save_flags(irq_flags); |
| 326 | pc = preempt_count(); |
| 327 | |
| 328 | buffer = tr->trace_buffer.buffer; |
| 329 | event = trace_buffer_lock_reserve(buffer, |
| 330 | sys_data->enter_event->event.type, size, irq_flags, pc); |
| 331 | if (!event) |
| 332 | return; |
| 333 | |
| 334 | entry = ring_buffer_event_data(event); |
| 335 | entry->nr = syscall_nr; |
| 336 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
| 337 | |
| 338 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
| 339 | irq_flags, pc); |
| 340 | } |
| 341 | |
| 342 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
| 343 | { |
| 344 | struct trace_array *tr = data; |
| 345 | struct trace_event_file *trace_file; |
| 346 | struct syscall_trace_exit *entry; |
| 347 | struct syscall_metadata *sys_data; |
| 348 | struct ring_buffer_event *event; |
| 349 | struct ring_buffer *buffer; |
| 350 | unsigned long irq_flags; |
| 351 | int pc; |
| 352 | int syscall_nr; |
| 353 | |
| 354 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 355 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 356 | return; |
| 357 | |
| 358 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ |
| 359 | trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); |
| 360 | if (!trace_file) |
| 361 | return; |
| 362 | |
| 363 | if (trace_trigger_soft_disabled(trace_file)) |
| 364 | return; |
| 365 | |
| 366 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 367 | if (!sys_data) |
| 368 | return; |
| 369 | |
| 370 | local_save_flags(irq_flags); |
| 371 | pc = preempt_count(); |
| 372 | |
| 373 | buffer = tr->trace_buffer.buffer; |
| 374 | event = trace_buffer_lock_reserve(buffer, |
| 375 | sys_data->exit_event->event.type, sizeof(*entry), |
| 376 | irq_flags, pc); |
| 377 | if (!event) |
| 378 | return; |
| 379 | |
| 380 | entry = ring_buffer_event_data(event); |
| 381 | entry->nr = syscall_nr; |
| 382 | entry->ret = syscall_get_return_value(current, regs); |
| 383 | |
| 384 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
| 385 | irq_flags, pc); |
| 386 | } |
| 387 | |
| 388 | static int reg_event_syscall_enter(struct trace_event_file *file, |
| 389 | struct trace_event_call *call) |
| 390 | { |
| 391 | struct trace_array *tr = file->tr; |
| 392 | int ret = 0; |
| 393 | int num; |
| 394 | |
| 395 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 396 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 397 | return -ENOSYS; |
| 398 | mutex_lock(&syscall_trace_lock); |
| 399 | if (!tr->sys_refcount_enter) |
| 400 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); |
| 401 | if (!ret) { |
| 402 | rcu_assign_pointer(tr->enter_syscall_files[num], file); |
| 403 | tr->sys_refcount_enter++; |
| 404 | } |
| 405 | mutex_unlock(&syscall_trace_lock); |
| 406 | return ret; |
| 407 | } |
| 408 | |
| 409 | static void unreg_event_syscall_enter(struct trace_event_file *file, |
| 410 | struct trace_event_call *call) |
| 411 | { |
| 412 | struct trace_array *tr = file->tr; |
| 413 | int num; |
| 414 | |
| 415 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 416 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 417 | return; |
| 418 | mutex_lock(&syscall_trace_lock); |
| 419 | tr->sys_refcount_enter--; |
| 420 | RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); |
| 421 | if (!tr->sys_refcount_enter) |
| 422 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
| 423 | mutex_unlock(&syscall_trace_lock); |
| 424 | } |
| 425 | |
| 426 | static int reg_event_syscall_exit(struct trace_event_file *file, |
| 427 | struct trace_event_call *call) |
| 428 | { |
| 429 | struct trace_array *tr = file->tr; |
| 430 | int ret = 0; |
| 431 | int num; |
| 432 | |
| 433 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 434 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 435 | return -ENOSYS; |
| 436 | mutex_lock(&syscall_trace_lock); |
| 437 | if (!tr->sys_refcount_exit) |
| 438 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); |
| 439 | if (!ret) { |
| 440 | rcu_assign_pointer(tr->exit_syscall_files[num], file); |
| 441 | tr->sys_refcount_exit++; |
| 442 | } |
| 443 | mutex_unlock(&syscall_trace_lock); |
| 444 | return ret; |
| 445 | } |
| 446 | |
| 447 | static void unreg_event_syscall_exit(struct trace_event_file *file, |
| 448 | struct trace_event_call *call) |
| 449 | { |
| 450 | struct trace_array *tr = file->tr; |
| 451 | int num; |
| 452 | |
| 453 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 454 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 455 | return; |
| 456 | mutex_lock(&syscall_trace_lock); |
| 457 | tr->sys_refcount_exit--; |
| 458 | RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); |
| 459 | if (!tr->sys_refcount_exit) |
| 460 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
| 461 | mutex_unlock(&syscall_trace_lock); |
| 462 | } |
| 463 | |
| 464 | static int __init init_syscall_trace(struct trace_event_call *call) |
| 465 | { |
| 466 | int id; |
| 467 | int num; |
| 468 | |
| 469 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 470 | if (num < 0 || num >= NR_syscalls) { |
| 471 | pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", |
| 472 | ((struct syscall_metadata *)call->data)->name); |
| 473 | return -ENOSYS; |
| 474 | } |
| 475 | |
| 476 | if (set_syscall_print_fmt(call) < 0) |
| 477 | return -ENOMEM; |
| 478 | |
| 479 | id = trace_event_raw_init(call); |
| 480 | |
| 481 | if (id < 0) { |
| 482 | free_syscall_print_fmt(call); |
| 483 | return id; |
| 484 | } |
| 485 | |
| 486 | return id; |
| 487 | } |
| 488 | |
| 489 | struct trace_event_functions enter_syscall_print_funcs = { |
| 490 | .trace = print_syscall_enter, |
| 491 | }; |
| 492 | |
| 493 | struct trace_event_functions exit_syscall_print_funcs = { |
| 494 | .trace = print_syscall_exit, |
| 495 | }; |
| 496 | |
| 497 | struct trace_event_class __refdata event_class_syscall_enter = { |
| 498 | .system = "syscalls", |
| 499 | .reg = syscall_enter_register, |
| 500 | .define_fields = syscall_enter_define_fields, |
| 501 | .get_fields = syscall_get_enter_fields, |
| 502 | .raw_init = init_syscall_trace, |
| 503 | }; |
| 504 | |
| 505 | struct trace_event_class __refdata event_class_syscall_exit = { |
| 506 | .system = "syscalls", |
| 507 | .reg = syscall_exit_register, |
| 508 | .define_fields = syscall_exit_define_fields, |
| 509 | .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), |
| 510 | .raw_init = init_syscall_trace, |
| 511 | }; |
| 512 | |
| 513 | unsigned long __init __weak arch_syscall_addr(int nr) |
| 514 | { |
| 515 | return (unsigned long)sys_call_table[nr]; |
| 516 | } |
| 517 | |
| 518 | void __init init_ftrace_syscalls(void) |
| 519 | { |
| 520 | struct syscall_metadata *meta; |
| 521 | unsigned long addr; |
| 522 | int i; |
| 523 | |
| 524 | syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), |
| 525 | GFP_KERNEL); |
| 526 | if (!syscalls_metadata) { |
| 527 | WARN_ON(1); |
| 528 | return; |
| 529 | } |
| 530 | |
| 531 | for (i = 0; i < NR_syscalls; i++) { |
| 532 | addr = arch_syscall_addr(i); |
| 533 | meta = find_syscall_meta(addr); |
| 534 | if (!meta) |
| 535 | continue; |
| 536 | |
| 537 | meta->syscall_nr = i; |
| 538 | syscalls_metadata[i] = meta; |
| 539 | } |
| 540 | } |
| 541 | |
| 542 | #ifdef CONFIG_PERF_EVENTS |
| 543 | |
| 544 | static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); |
| 545 | static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); |
| 546 | static int sys_perf_refcount_enter; |
| 547 | static int sys_perf_refcount_exit; |
| 548 | |
| 549 | static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) |
| 550 | { |
| 551 | struct syscall_metadata *sys_data; |
| 552 | struct syscall_trace_enter *rec; |
| 553 | struct hlist_head *head; |
| 554 | int syscall_nr; |
| 555 | int rctx; |
| 556 | int size; |
| 557 | |
| 558 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 559 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 560 | return; |
| 561 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
| 562 | return; |
| 563 | |
| 564 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 565 | if (!sys_data) |
| 566 | return; |
| 567 | |
| 568 | head = this_cpu_ptr(sys_data->enter_event->perf_events); |
| 569 | if (hlist_empty(head)) |
| 570 | return; |
| 571 | |
| 572 | /* get the size after alignment with the u32 buffer size field */ |
| 573 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); |
| 574 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
| 575 | size -= sizeof(u32); |
| 576 | |
| 577 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
| 578 | sys_data->enter_event->event.type, NULL, &rctx); |
| 579 | if (!rec) |
| 580 | return; |
| 581 | |
| 582 | rec->nr = syscall_nr; |
| 583 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
| 584 | (unsigned long *)&rec->args); |
| 585 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
| 586 | } |
| 587 | |
| 588 | static int perf_sysenter_enable(struct trace_event_call *call) |
| 589 | { |
| 590 | int ret = 0; |
| 591 | int num; |
| 592 | |
| 593 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 594 | |
| 595 | mutex_lock(&syscall_trace_lock); |
| 596 | if (!sys_perf_refcount_enter) |
| 597 | ret = register_trace_sys_enter(perf_syscall_enter, NULL); |
| 598 | if (ret) { |
| 599 | pr_info("event trace: Could not activate" |
| 600 | "syscall entry trace point"); |
| 601 | } else { |
| 602 | set_bit(num, enabled_perf_enter_syscalls); |
| 603 | sys_perf_refcount_enter++; |
| 604 | } |
| 605 | mutex_unlock(&syscall_trace_lock); |
| 606 | return ret; |
| 607 | } |
| 608 | |
| 609 | static void perf_sysenter_disable(struct trace_event_call *call) |
| 610 | { |
| 611 | int num; |
| 612 | |
| 613 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 614 | |
| 615 | mutex_lock(&syscall_trace_lock); |
| 616 | sys_perf_refcount_enter--; |
| 617 | clear_bit(num, enabled_perf_enter_syscalls); |
| 618 | if (!sys_perf_refcount_enter) |
| 619 | unregister_trace_sys_enter(perf_syscall_enter, NULL); |
| 620 | mutex_unlock(&syscall_trace_lock); |
| 621 | } |
| 622 | |
| 623 | static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) |
| 624 | { |
| 625 | struct syscall_metadata *sys_data; |
| 626 | struct syscall_trace_exit *rec; |
| 627 | struct hlist_head *head; |
| 628 | int syscall_nr; |
| 629 | int rctx; |
| 630 | int size; |
| 631 | |
| 632 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 633 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 634 | return; |
| 635 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
| 636 | return; |
| 637 | |
| 638 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 639 | if (!sys_data) |
| 640 | return; |
| 641 | |
| 642 | head = this_cpu_ptr(sys_data->exit_event->perf_events); |
| 643 | if (hlist_empty(head)) |
| 644 | return; |
| 645 | |
| 646 | /* We can probably do that at build time */ |
| 647 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); |
| 648 | size -= sizeof(u32); |
| 649 | |
| 650 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
| 651 | sys_data->exit_event->event.type, NULL, &rctx); |
| 652 | if (!rec) |
| 653 | return; |
| 654 | |
| 655 | rec->nr = syscall_nr; |
| 656 | rec->ret = syscall_get_return_value(current, regs); |
| 657 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
| 658 | } |
| 659 | |
| 660 | static int perf_sysexit_enable(struct trace_event_call *call) |
| 661 | { |
| 662 | int ret = 0; |
| 663 | int num; |
| 664 | |
| 665 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 666 | |
| 667 | mutex_lock(&syscall_trace_lock); |
| 668 | if (!sys_perf_refcount_exit) |
| 669 | ret = register_trace_sys_exit(perf_syscall_exit, NULL); |
| 670 | if (ret) { |
| 671 | pr_info("event trace: Could not activate" |
| 672 | "syscall exit trace point"); |
| 673 | } else { |
| 674 | set_bit(num, enabled_perf_exit_syscalls); |
| 675 | sys_perf_refcount_exit++; |
| 676 | } |
| 677 | mutex_unlock(&syscall_trace_lock); |
| 678 | return ret; |
| 679 | } |
| 680 | |
| 681 | static void perf_sysexit_disable(struct trace_event_call *call) |
| 682 | { |
| 683 | int num; |
| 684 | |
| 685 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 686 | |
| 687 | mutex_lock(&syscall_trace_lock); |
| 688 | sys_perf_refcount_exit--; |
| 689 | clear_bit(num, enabled_perf_exit_syscalls); |
| 690 | if (!sys_perf_refcount_exit) |
| 691 | unregister_trace_sys_exit(perf_syscall_exit, NULL); |
| 692 | mutex_unlock(&syscall_trace_lock); |
| 693 | } |
| 694 | |
| 695 | #endif /* CONFIG_PERF_EVENTS */ |
| 696 | |
| 697 | static int syscall_enter_register(struct trace_event_call *event, |
| 698 | enum trace_reg type, void *data) |
| 699 | { |
| 700 | struct trace_event_file *file = data; |
| 701 | |
| 702 | switch (type) { |
| 703 | case TRACE_REG_REGISTER: |
| 704 | return reg_event_syscall_enter(file, event); |
| 705 | case TRACE_REG_UNREGISTER: |
| 706 | unreg_event_syscall_enter(file, event); |
| 707 | return 0; |
| 708 | |
| 709 | #ifdef CONFIG_PERF_EVENTS |
| 710 | case TRACE_REG_PERF_REGISTER: |
| 711 | return perf_sysenter_enable(event); |
| 712 | case TRACE_REG_PERF_UNREGISTER: |
| 713 | perf_sysenter_disable(event); |
| 714 | return 0; |
| 715 | case TRACE_REG_PERF_OPEN: |
| 716 | case TRACE_REG_PERF_CLOSE: |
| 717 | case TRACE_REG_PERF_ADD: |
| 718 | case TRACE_REG_PERF_DEL: |
| 719 | return 0; |
| 720 | #endif |
| 721 | } |
| 722 | return 0; |
| 723 | } |
| 724 | |
| 725 | static int syscall_exit_register(struct trace_event_call *event, |
| 726 | enum trace_reg type, void *data) |
| 727 | { |
| 728 | struct trace_event_file *file = data; |
| 729 | |
| 730 | switch (type) { |
| 731 | case TRACE_REG_REGISTER: |
| 732 | return reg_event_syscall_exit(file, event); |
| 733 | case TRACE_REG_UNREGISTER: |
| 734 | unreg_event_syscall_exit(file, event); |
| 735 | return 0; |
| 736 | |
| 737 | #ifdef CONFIG_PERF_EVENTS |
| 738 | case TRACE_REG_PERF_REGISTER: |
| 739 | return perf_sysexit_enable(event); |
| 740 | case TRACE_REG_PERF_UNREGISTER: |
| 741 | perf_sysexit_disable(event); |
| 742 | return 0; |
| 743 | case TRACE_REG_PERF_OPEN: |
| 744 | case TRACE_REG_PERF_CLOSE: |
| 745 | case TRACE_REG_PERF_ADD: |
| 746 | case TRACE_REG_PERF_DEL: |
| 747 | return 0; |
| 748 | #endif |
| 749 | } |
| 750 | return 0; |
| 751 | } |