Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License as published by |
| 4 | * the Free Software Foundation; either version 2 of the License, or |
| 5 | * (at your option) any later version. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | * You should have received a copy of the GNU General Public License |
| 13 | * along with this program; if not, write to the Free Software |
| 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 15 | * |
| 16 | * Copyright (C) 2007 Alan Stern |
| 17 | * Copyright (C) IBM Corporation, 2009 |
| 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
| 19 | * |
| 20 | * Thanks to Ingo Molnar for his many suggestions. |
| 21 | * |
| 22 | * Authors: Alan Stern <stern@rowland.harvard.edu> |
| 23 | * K.Prasad <prasad@linux.vnet.ibm.com> |
| 24 | * Frederic Weisbecker <fweisbec@gmail.com> |
| 25 | */ |
| 26 | |
| 27 | /* |
| 28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, |
| 29 | * using the CPU's debug registers. |
| 30 | * This file contains the arch-independent routines. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/irqflags.h> |
| 34 | #include <linux/kallsyms.h> |
| 35 | #include <linux/notifier.h> |
| 36 | #include <linux/kprobes.h> |
| 37 | #include <linux/kdebug.h> |
| 38 | #include <linux/kernel.h> |
| 39 | #include <linux/module.h> |
| 40 | #include <linux/percpu.h> |
| 41 | #include <linux/sched.h> |
| 42 | #include <linux/init.h> |
| 43 | #include <linux/slab.h> |
| 44 | #include <linux/list.h> |
| 45 | #include <linux/cpu.h> |
| 46 | #include <linux/smp.h> |
| 47 | |
| 48 | #include <linux/hw_breakpoint.h> |
| 49 | /* |
| 50 | * Constraints data |
| 51 | */ |
| 52 | struct bp_cpuinfo { |
| 53 | /* Number of pinned cpu breakpoints in a cpu */ |
| 54 | unsigned int cpu_pinned; |
| 55 | /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */ |
| 56 | unsigned int *tsk_pinned; |
| 57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
| 58 | unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */ |
| 59 | }; |
| 60 | |
| 61 | static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]); |
| 62 | static int nr_slots[TYPE_MAX]; |
| 63 | |
| 64 | static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) |
| 65 | { |
| 66 | return per_cpu_ptr(bp_cpuinfo + type, cpu); |
| 67 | } |
| 68 | |
| 69 | /* Keep track of the breakpoints attached to tasks */ |
| 70 | static LIST_HEAD(bp_task_head); |
| 71 | |
| 72 | static int constraints_initialized; |
| 73 | |
| 74 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
| 75 | struct bp_busy_slots { |
| 76 | unsigned int pinned; |
| 77 | unsigned int flexible; |
| 78 | }; |
| 79 | |
| 80 | /* Serialize accesses to the above constraints */ |
| 81 | static DEFINE_MUTEX(nr_bp_mutex); |
| 82 | |
| 83 | __weak int hw_breakpoint_weight(struct perf_event *bp) |
| 84 | { |
| 85 | return 1; |
| 86 | } |
| 87 | |
| 88 | static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) |
| 89 | { |
| 90 | if (bp->attr.bp_type & HW_BREAKPOINT_RW) |
| 91 | return TYPE_DATA; |
| 92 | |
| 93 | return TYPE_INST; |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * Report the maximum number of pinned breakpoints a task |
| 98 | * have in this cpu |
| 99 | */ |
| 100 | static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
| 101 | { |
| 102 | unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; |
| 103 | int i; |
| 104 | |
| 105 | for (i = nr_slots[type] - 1; i >= 0; i--) { |
| 106 | if (tsk_pinned[i] > 0) |
| 107 | return i + 1; |
| 108 | } |
| 109 | |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * Count the number of breakpoints of the same type and same task. |
| 115 | * The given event must be not on the list. |
| 116 | */ |
| 117 | static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) |
| 118 | { |
| 119 | struct task_struct *tsk = bp->hw.target; |
| 120 | struct perf_event *iter; |
| 121 | int count = 0; |
| 122 | |
| 123 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
| 124 | if (iter->hw.target == tsk && |
| 125 | find_slot_idx(iter) == type && |
| 126 | (iter->cpu < 0 || cpu == iter->cpu)) |
| 127 | count += hw_breakpoint_weight(iter); |
| 128 | } |
| 129 | |
| 130 | return count; |
| 131 | } |
| 132 | |
| 133 | static const struct cpumask *cpumask_of_bp(struct perf_event *bp) |
| 134 | { |
| 135 | if (bp->cpu >= 0) |
| 136 | return cpumask_of(bp->cpu); |
| 137 | return cpu_possible_mask; |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * Report the number of pinned/un-pinned breakpoints we have in |
| 142 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
| 143 | */ |
| 144 | static void |
| 145 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
| 146 | enum bp_type_idx type) |
| 147 | { |
| 148 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
| 149 | int cpu; |
| 150 | |
| 151 | for_each_cpu(cpu, cpumask) { |
| 152 | struct bp_cpuinfo *info = get_bp_info(cpu, type); |
| 153 | int nr; |
| 154 | |
| 155 | nr = info->cpu_pinned; |
| 156 | if (!bp->hw.target) |
| 157 | nr += max_task_bp_pinned(cpu, type); |
| 158 | else |
| 159 | nr += task_bp_pinned(cpu, bp, type); |
| 160 | |
| 161 | if (nr > slots->pinned) |
| 162 | slots->pinned = nr; |
| 163 | |
| 164 | nr = info->flexible; |
| 165 | if (nr > slots->flexible) |
| 166 | slots->flexible = nr; |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | * For now, continue to consider flexible as pinned, until we can |
| 172 | * ensure no flexible event can ever be scheduled before a pinned event |
| 173 | * in a same cpu. |
| 174 | */ |
| 175 | static void |
| 176 | fetch_this_slot(struct bp_busy_slots *slots, int weight) |
| 177 | { |
| 178 | slots->pinned += weight; |
| 179 | } |
| 180 | |
| 181 | /* |
| 182 | * Add a pinned breakpoint for the given task in our constraint table |
| 183 | */ |
| 184 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, |
| 185 | enum bp_type_idx type, int weight) |
| 186 | { |
| 187 | unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; |
| 188 | int old_idx, new_idx; |
| 189 | |
| 190 | old_idx = task_bp_pinned(cpu, bp, type) - 1; |
| 191 | new_idx = old_idx + weight; |
| 192 | |
| 193 | if (old_idx >= 0) |
| 194 | tsk_pinned[old_idx]--; |
| 195 | if (new_idx >= 0) |
| 196 | tsk_pinned[new_idx]++; |
| 197 | } |
| 198 | |
| 199 | /* |
| 200 | * Add/remove the given breakpoint in our constraint table |
| 201 | */ |
| 202 | static void |
| 203 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
| 204 | int weight) |
| 205 | { |
| 206 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
| 207 | int cpu; |
| 208 | |
| 209 | if (!enable) |
| 210 | weight = -weight; |
| 211 | |
| 212 | /* Pinned counter cpu profiling */ |
| 213 | if (!bp->hw.target) { |
| 214 | get_bp_info(bp->cpu, type)->cpu_pinned += weight; |
| 215 | return; |
| 216 | } |
| 217 | |
| 218 | /* Pinned counter task profiling */ |
| 219 | for_each_cpu(cpu, cpumask) |
| 220 | toggle_bp_task_slot(bp, cpu, type, weight); |
| 221 | |
| 222 | if (enable) |
| 223 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |
| 224 | else |
| 225 | list_del(&bp->hw.bp_list); |
| 226 | } |
| 227 | |
| 228 | /* |
| 229 | * Function to perform processor-specific cleanup during unregistration |
| 230 | */ |
| 231 | __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) |
| 232 | { |
| 233 | /* |
| 234 | * A weak stub function here for those archs that don't define |
| 235 | * it inside arch/.../kernel/hw_breakpoint.c |
| 236 | */ |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Contraints to check before allowing this new breakpoint counter: |
| 241 | * |
| 242 | * == Non-pinned counter == (Considered as pinned for now) |
| 243 | * |
| 244 | * - If attached to a single cpu, check: |
| 245 | * |
| 246 | * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu) |
| 247 | * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM |
| 248 | * |
| 249 | * -> If there are already non-pinned counters in this cpu, it means |
| 250 | * there is already a free slot for them. |
| 251 | * Otherwise, we check that the maximum number of per task |
| 252 | * breakpoints (for this cpu) plus the number of per cpu breakpoint |
| 253 | * (for this cpu) doesn't cover every registers. |
| 254 | * |
| 255 | * - If attached to every cpus, check: |
| 256 | * |
| 257 | * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *)) |
| 258 | * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM |
| 259 | * |
| 260 | * -> This is roughly the same, except we check the number of per cpu |
| 261 | * bp for every cpu and we keep the max one. Same for the per tasks |
| 262 | * breakpoints. |
| 263 | * |
| 264 | * |
| 265 | * == Pinned counter == |
| 266 | * |
| 267 | * - If attached to a single cpu, check: |
| 268 | * |
| 269 | * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu) |
| 270 | * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM |
| 271 | * |
| 272 | * -> Same checks as before. But now the info->flexible, if any, must keep |
| 273 | * one register at least (or they will never be fed). |
| 274 | * |
| 275 | * - If attached to every cpus, check: |
| 276 | * |
| 277 | * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *)) |
| 278 | * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM |
| 279 | */ |
| 280 | static int __reserve_bp_slot(struct perf_event *bp) |
| 281 | { |
| 282 | struct bp_busy_slots slots = {0}; |
| 283 | enum bp_type_idx type; |
| 284 | int weight; |
| 285 | |
| 286 | /* We couldn't initialize breakpoint constraints on boot */ |
| 287 | if (!constraints_initialized) |
| 288 | return -ENOMEM; |
| 289 | |
| 290 | /* Basic checks */ |
| 291 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || |
| 292 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) |
| 293 | return -EINVAL; |
| 294 | |
| 295 | type = find_slot_idx(bp); |
| 296 | weight = hw_breakpoint_weight(bp); |
| 297 | |
| 298 | fetch_bp_busy_slots(&slots, bp, type); |
| 299 | /* |
| 300 | * Simulate the addition of this breakpoint to the constraints |
| 301 | * and see the result. |
| 302 | */ |
| 303 | fetch_this_slot(&slots, weight); |
| 304 | |
| 305 | /* Flexible counters need to keep at least one slot */ |
| 306 | if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
| 307 | return -ENOSPC; |
| 308 | |
| 309 | toggle_bp_slot(bp, true, type, weight); |
| 310 | |
| 311 | return 0; |
| 312 | } |
| 313 | |
| 314 | int reserve_bp_slot(struct perf_event *bp) |
| 315 | { |
| 316 | int ret; |
| 317 | |
| 318 | mutex_lock(&nr_bp_mutex); |
| 319 | |
| 320 | ret = __reserve_bp_slot(bp); |
| 321 | |
| 322 | mutex_unlock(&nr_bp_mutex); |
| 323 | |
| 324 | return ret; |
| 325 | } |
| 326 | |
| 327 | static void __release_bp_slot(struct perf_event *bp) |
| 328 | { |
| 329 | enum bp_type_idx type; |
| 330 | int weight; |
| 331 | |
| 332 | type = find_slot_idx(bp); |
| 333 | weight = hw_breakpoint_weight(bp); |
| 334 | toggle_bp_slot(bp, false, type, weight); |
| 335 | } |
| 336 | |
| 337 | void release_bp_slot(struct perf_event *bp) |
| 338 | { |
| 339 | mutex_lock(&nr_bp_mutex); |
| 340 | |
| 341 | arch_unregister_hw_breakpoint(bp); |
| 342 | __release_bp_slot(bp); |
| 343 | |
| 344 | mutex_unlock(&nr_bp_mutex); |
| 345 | } |
| 346 | |
| 347 | /* |
| 348 | * Allow the kernel debugger to reserve breakpoint slots without |
| 349 | * taking a lock using the dbg_* variant of for the reserve and |
| 350 | * release breakpoint slots. |
| 351 | */ |
| 352 | int dbg_reserve_bp_slot(struct perf_event *bp) |
| 353 | { |
| 354 | if (mutex_is_locked(&nr_bp_mutex)) |
| 355 | return -1; |
| 356 | |
| 357 | return __reserve_bp_slot(bp); |
| 358 | } |
| 359 | |
| 360 | int dbg_release_bp_slot(struct perf_event *bp) |
| 361 | { |
| 362 | if (mutex_is_locked(&nr_bp_mutex)) |
| 363 | return -1; |
| 364 | |
| 365 | __release_bp_slot(bp); |
| 366 | |
| 367 | return 0; |
| 368 | } |
| 369 | |
| 370 | static int validate_hw_breakpoint(struct perf_event *bp) |
| 371 | { |
| 372 | int ret; |
| 373 | |
| 374 | ret = arch_validate_hwbkpt_settings(bp); |
| 375 | if (ret) |
| 376 | return ret; |
| 377 | |
| 378 | if (arch_check_bp_in_kernelspace(bp)) { |
| 379 | if (bp->attr.exclude_kernel) |
| 380 | return -EINVAL; |
| 381 | /* |
| 382 | * Don't let unprivileged users set a breakpoint in the trap |
| 383 | * path to avoid trap recursion attacks. |
| 384 | */ |
| 385 | if (!capable(CAP_SYS_ADMIN)) |
| 386 | return -EPERM; |
| 387 | } |
| 388 | |
| 389 | return 0; |
| 390 | } |
| 391 | |
| 392 | int register_perf_hw_breakpoint(struct perf_event *bp) |
| 393 | { |
| 394 | int ret; |
| 395 | |
| 396 | ret = reserve_bp_slot(bp); |
| 397 | if (ret) |
| 398 | return ret; |
| 399 | |
| 400 | ret = validate_hw_breakpoint(bp); |
| 401 | |
| 402 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ |
| 403 | if (ret) |
| 404 | release_bp_slot(bp); |
| 405 | |
| 406 | return ret; |
| 407 | } |
| 408 | |
| 409 | /** |
| 410 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
| 411 | * @attr: breakpoint attributes |
| 412 | * @triggered: callback to trigger when we hit the breakpoint |
| 413 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
| 414 | */ |
| 415 | struct perf_event * |
| 416 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
| 417 | perf_overflow_handler_t triggered, |
| 418 | void *context, |
| 419 | struct task_struct *tsk) |
| 420 | { |
| 421 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered, |
| 422 | context); |
| 423 | } |
| 424 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
| 425 | |
| 426 | /** |
| 427 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint |
| 428 | * @bp: the breakpoint structure to modify |
| 429 | * @attr: new breakpoint attributes |
| 430 | * @triggered: callback to trigger when we hit the breakpoint |
| 431 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
| 432 | */ |
| 433 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
| 434 | { |
| 435 | u64 old_addr = bp->attr.bp_addr; |
| 436 | u64 old_len = bp->attr.bp_len; |
| 437 | int old_type = bp->attr.bp_type; |
| 438 | int err = 0; |
| 439 | |
| 440 | /* |
| 441 | * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it |
| 442 | * will not be possible to raise IPIs that invoke __perf_event_disable. |
| 443 | * So call the function directly after making sure we are targeting the |
| 444 | * current task. |
| 445 | */ |
| 446 | if (irqs_disabled() && bp->ctx && bp->ctx->task == current) |
| 447 | __perf_event_disable(bp); |
| 448 | else |
| 449 | perf_event_disable(bp); |
| 450 | |
| 451 | bp->attr.bp_addr = attr->bp_addr; |
| 452 | bp->attr.bp_type = attr->bp_type; |
| 453 | bp->attr.bp_len = attr->bp_len; |
| 454 | |
| 455 | if (attr->disabled) |
| 456 | goto end; |
| 457 | |
| 458 | err = validate_hw_breakpoint(bp); |
| 459 | if (!err) |
| 460 | perf_event_enable(bp); |
| 461 | |
| 462 | if (err) { |
| 463 | bp->attr.bp_addr = old_addr; |
| 464 | bp->attr.bp_type = old_type; |
| 465 | bp->attr.bp_len = old_len; |
| 466 | if (!bp->attr.disabled) |
| 467 | perf_event_enable(bp); |
| 468 | |
| 469 | return err; |
| 470 | } |
| 471 | |
| 472 | end: |
| 473 | bp->attr.disabled = attr->disabled; |
| 474 | |
| 475 | return 0; |
| 476 | } |
| 477 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
| 478 | |
| 479 | /** |
| 480 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
| 481 | * @bp: the breakpoint structure to unregister |
| 482 | */ |
| 483 | void unregister_hw_breakpoint(struct perf_event *bp) |
| 484 | { |
| 485 | if (!bp) |
| 486 | return; |
| 487 | perf_event_release_kernel(bp); |
| 488 | } |
| 489 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); |
| 490 | |
| 491 | /** |
| 492 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
| 493 | * @attr: breakpoint attributes |
| 494 | * @triggered: callback to trigger when we hit the breakpoint |
| 495 | * |
| 496 | * @return a set of per_cpu pointers to perf events |
| 497 | */ |
| 498 | struct perf_event * __percpu * |
| 499 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
| 500 | perf_overflow_handler_t triggered, |
| 501 | void *context) |
| 502 | { |
| 503 | struct perf_event * __percpu *cpu_events, *bp; |
| 504 | long err = 0; |
| 505 | int cpu; |
| 506 | |
| 507 | cpu_events = alloc_percpu(typeof(*cpu_events)); |
| 508 | if (!cpu_events) |
| 509 | return (void __percpu __force *)ERR_PTR(-ENOMEM); |
| 510 | |
| 511 | get_online_cpus(); |
| 512 | for_each_online_cpu(cpu) { |
| 513 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, |
| 514 | triggered, context); |
| 515 | if (IS_ERR(bp)) { |
| 516 | err = PTR_ERR(bp); |
| 517 | break; |
| 518 | } |
| 519 | |
| 520 | per_cpu(*cpu_events, cpu) = bp; |
| 521 | } |
| 522 | put_online_cpus(); |
| 523 | |
| 524 | if (likely(!err)) |
| 525 | return cpu_events; |
| 526 | |
| 527 | unregister_wide_hw_breakpoint(cpu_events); |
| 528 | return (void __percpu __force *)ERR_PTR(err); |
| 529 | } |
| 530 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
| 531 | |
| 532 | /** |
| 533 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
| 534 | * @cpu_events: the per cpu set of events to unregister |
| 535 | */ |
| 536 | void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
| 537 | { |
| 538 | int cpu; |
| 539 | |
| 540 | for_each_possible_cpu(cpu) |
| 541 | unregister_hw_breakpoint(per_cpu(*cpu_events, cpu)); |
| 542 | |
| 543 | free_percpu(cpu_events); |
| 544 | } |
| 545 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
| 546 | |
| 547 | static struct notifier_block hw_breakpoint_exceptions_nb = { |
| 548 | .notifier_call = hw_breakpoint_exceptions_notify, |
| 549 | /* we need to be notified first */ |
| 550 | .priority = 0x7fffffff |
| 551 | }; |
| 552 | |
| 553 | static void bp_perf_event_destroy(struct perf_event *event) |
| 554 | { |
| 555 | release_bp_slot(event); |
| 556 | } |
| 557 | |
| 558 | static int hw_breakpoint_event_init(struct perf_event *bp) |
| 559 | { |
| 560 | int err; |
| 561 | |
| 562 | if (bp->attr.type != PERF_TYPE_BREAKPOINT) |
| 563 | return -ENOENT; |
| 564 | |
| 565 | /* |
| 566 | * no branch sampling for breakpoint events |
| 567 | */ |
| 568 | if (has_branch_stack(bp)) |
| 569 | return -EOPNOTSUPP; |
| 570 | |
| 571 | err = register_perf_hw_breakpoint(bp); |
| 572 | if (err) |
| 573 | return err; |
| 574 | |
| 575 | bp->destroy = bp_perf_event_destroy; |
| 576 | |
| 577 | return 0; |
| 578 | } |
| 579 | |
| 580 | static int hw_breakpoint_add(struct perf_event *bp, int flags) |
| 581 | { |
| 582 | if (!(flags & PERF_EF_START)) |
| 583 | bp->hw.state = PERF_HES_STOPPED; |
| 584 | |
| 585 | if (is_sampling_event(bp)) { |
| 586 | bp->hw.last_period = bp->hw.sample_period; |
| 587 | perf_swevent_set_period(bp); |
| 588 | } |
| 589 | |
| 590 | return arch_install_hw_breakpoint(bp); |
| 591 | } |
| 592 | |
| 593 | static void hw_breakpoint_del(struct perf_event *bp, int flags) |
| 594 | { |
| 595 | arch_uninstall_hw_breakpoint(bp); |
| 596 | } |
| 597 | |
| 598 | static void hw_breakpoint_start(struct perf_event *bp, int flags) |
| 599 | { |
| 600 | bp->hw.state = 0; |
| 601 | } |
| 602 | |
| 603 | static void hw_breakpoint_stop(struct perf_event *bp, int flags) |
| 604 | { |
| 605 | bp->hw.state = PERF_HES_STOPPED; |
| 606 | } |
| 607 | |
| 608 | static struct pmu perf_breakpoint = { |
| 609 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
| 610 | |
| 611 | .event_init = hw_breakpoint_event_init, |
| 612 | .add = hw_breakpoint_add, |
| 613 | .del = hw_breakpoint_del, |
| 614 | .start = hw_breakpoint_start, |
| 615 | .stop = hw_breakpoint_stop, |
| 616 | .read = hw_breakpoint_pmu_read, |
| 617 | }; |
| 618 | |
| 619 | int __init init_hw_breakpoint(void) |
| 620 | { |
| 621 | int cpu, err_cpu; |
| 622 | int i; |
| 623 | |
| 624 | for (i = 0; i < TYPE_MAX; i++) |
| 625 | nr_slots[i] = hw_breakpoint_slots(i); |
| 626 | |
| 627 | for_each_possible_cpu(cpu) { |
| 628 | for (i = 0; i < TYPE_MAX; i++) { |
| 629 | struct bp_cpuinfo *info = get_bp_info(cpu, i); |
| 630 | |
| 631 | info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int), |
| 632 | GFP_KERNEL); |
| 633 | if (!info->tsk_pinned) |
| 634 | goto err_alloc; |
| 635 | } |
| 636 | } |
| 637 | |
| 638 | constraints_initialized = 1; |
| 639 | |
| 640 | perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); |
| 641 | |
| 642 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
| 643 | |
| 644 | err_alloc: |
| 645 | for_each_possible_cpu(err_cpu) { |
| 646 | for (i = 0; i < TYPE_MAX; i++) |
| 647 | kfree(get_bp_info(err_cpu, i)->tsk_pinned); |
| 648 | if (err_cpu == cpu) |
| 649 | break; |
| 650 | } |
| 651 | |
| 652 | return -ENOMEM; |
| 653 | } |
| 654 | |
| 655 | |