Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame] | 1 | /* delayacct.c - per-task delay accounting |
| 2 | * |
| 3 | * Copyright (C) Shailabh Nagar, IBM Corp. 2006 |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it would be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
| 13 | * the GNU General Public License for more details. |
| 14 | */ |
| 15 | |
| 16 | #include <linux/sched.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/taskstats.h> |
| 19 | #include <linux/time.h> |
| 20 | #include <linux/sysctl.h> |
| 21 | #include <linux/delayacct.h> |
| 22 | #include <linux/module.h> |
| 23 | |
| 24 | int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */ |
| 25 | EXPORT_SYMBOL_GPL(delayacct_on); |
| 26 | struct kmem_cache *delayacct_cache; |
| 27 | |
| 28 | static int __init delayacct_setup_disable(char *str) |
| 29 | { |
| 30 | delayacct_on = 0; |
| 31 | return 1; |
| 32 | } |
| 33 | __setup("nodelayacct", delayacct_setup_disable); |
| 34 | |
| 35 | void delayacct_init(void) |
| 36 | { |
| 37 | delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC); |
| 38 | delayacct_tsk_init(&init_task); |
| 39 | } |
| 40 | |
| 41 | void __delayacct_tsk_init(struct task_struct *tsk) |
| 42 | { |
| 43 | tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); |
| 44 | if (tsk->delays) |
| 45 | spin_lock_init(&tsk->delays->lock); |
| 46 | } |
| 47 | |
| 48 | /* |
| 49 | * Finish delay accounting for a statistic using its timestamps (@start), |
| 50 | * accumalator (@total) and @count |
| 51 | */ |
| 52 | static void delayacct_end(u64 *start, u64 *total, u32 *count) |
| 53 | { |
| 54 | s64 ns = ktime_get_ns() - *start; |
| 55 | unsigned long flags; |
| 56 | |
| 57 | if (ns > 0) { |
| 58 | spin_lock_irqsave(¤t->delays->lock, flags); |
| 59 | *total += ns; |
| 60 | (*count)++; |
| 61 | spin_unlock_irqrestore(¤t->delays->lock, flags); |
| 62 | } |
| 63 | } |
| 64 | |
| 65 | void __delayacct_blkio_start(void) |
| 66 | { |
| 67 | current->delays->blkio_start = ktime_get_ns(); |
| 68 | } |
| 69 | |
| 70 | void __delayacct_blkio_end(void) |
| 71 | { |
| 72 | if (current->delays->flags & DELAYACCT_PF_SWAPIN) |
| 73 | /* Swapin block I/O */ |
| 74 | delayacct_end(¤t->delays->blkio_start, |
| 75 | ¤t->delays->swapin_delay, |
| 76 | ¤t->delays->swapin_count); |
| 77 | else /* Other block I/O */ |
| 78 | delayacct_end(¤t->delays->blkio_start, |
| 79 | ¤t->delays->blkio_delay, |
| 80 | ¤t->delays->blkio_count); |
| 81 | } |
| 82 | |
| 83 | int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) |
| 84 | { |
| 85 | cputime_t utime, stime, stimescaled, utimescaled; |
| 86 | unsigned long long t2, t3; |
| 87 | unsigned long flags, t1; |
| 88 | s64 tmp; |
| 89 | |
| 90 | task_cputime(tsk, &utime, &stime); |
| 91 | tmp = (s64)d->cpu_run_real_total; |
| 92 | tmp += cputime_to_nsecs(utime + stime); |
| 93 | d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; |
| 94 | |
| 95 | task_cputime_scaled(tsk, &utimescaled, &stimescaled); |
| 96 | tmp = (s64)d->cpu_scaled_run_real_total; |
| 97 | tmp += cputime_to_nsecs(utimescaled + stimescaled); |
| 98 | d->cpu_scaled_run_real_total = |
| 99 | (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; |
| 100 | |
| 101 | /* |
| 102 | * No locking available for sched_info (and too expensive to add one) |
| 103 | * Mitigate by taking snapshot of values |
| 104 | */ |
| 105 | t1 = tsk->sched_info.pcount; |
| 106 | t2 = tsk->sched_info.run_delay; |
| 107 | t3 = tsk->se.sum_exec_runtime; |
| 108 | |
| 109 | d->cpu_count += t1; |
| 110 | |
| 111 | tmp = (s64)d->cpu_delay_total + t2; |
| 112 | d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; |
| 113 | |
| 114 | tmp = (s64)d->cpu_run_virtual_total + t3; |
| 115 | d->cpu_run_virtual_total = |
| 116 | (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; |
| 117 | |
| 118 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ |
| 119 | |
| 120 | spin_lock_irqsave(&tsk->delays->lock, flags); |
| 121 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; |
| 122 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; |
| 123 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; |
| 124 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; |
| 125 | tmp = d->freepages_delay_total + tsk->delays->freepages_delay; |
| 126 | d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; |
| 127 | d->blkio_count += tsk->delays->blkio_count; |
| 128 | d->swapin_count += tsk->delays->swapin_count; |
| 129 | d->freepages_count += tsk->delays->freepages_count; |
| 130 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
| 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) |
| 136 | { |
| 137 | __u64 ret; |
| 138 | unsigned long flags; |
| 139 | |
| 140 | spin_lock_irqsave(&tsk->delays->lock, flags); |
| 141 | ret = nsec_to_clock_t(tsk->delays->blkio_delay + |
| 142 | tsk->delays->swapin_delay); |
| 143 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
| 144 | return ret; |
| 145 | } |
| 146 | |
| 147 | void __delayacct_freepages_start(void) |
| 148 | { |
| 149 | current->delays->freepages_start = ktime_get_ns(); |
| 150 | } |
| 151 | |
| 152 | void __delayacct_freepages_end(void) |
| 153 | { |
| 154 | delayacct_end(¤t->delays->freepages_start, |
| 155 | ¤t->delays->freepages_delay, |
| 156 | ¤t->delays->freepages_count); |
| 157 | } |
| 158 | |