Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame] | 1 | /* |
| 2 | * tracing clocks |
| 3 | * |
| 4 | * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 5 | * |
| 6 | * Implements 3 trace clock variants, with differing scalability/precision |
| 7 | * tradeoffs: |
| 8 | * |
| 9 | * - local: CPU-local trace clock |
| 10 | * - medium: scalable global clock with some jitter |
| 11 | * - global: globally monotonic, serialized clock |
| 12 | * |
| 13 | * Tracer plugins will chose a default from these clocks. |
| 14 | */ |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/irqflags.h> |
| 17 | #include <linux/hardirq.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/percpu.h> |
| 20 | #include <linux/sched.h> |
| 21 | #include <linux/ktime.h> |
| 22 | #include <linux/trace_clock.h> |
| 23 | |
| 24 | /* |
| 25 | * trace_clock_local(): the simplest and least coherent tracing clock. |
| 26 | * |
| 27 | * Useful for tracing that does not cross to other CPUs nor |
| 28 | * does it go through idle events. |
| 29 | */ |
| 30 | u64 notrace trace_clock_local(void) |
| 31 | { |
| 32 | u64 clock; |
| 33 | |
| 34 | /* |
| 35 | * sched_clock() is an architecture implemented, fast, scalable, |
| 36 | * lockless clock. It is not guaranteed to be coherent across |
| 37 | * CPUs, nor across CPU idle events. |
| 38 | */ |
| 39 | preempt_disable_notrace(); |
| 40 | clock = sched_clock(); |
| 41 | preempt_enable_notrace(); |
| 42 | |
| 43 | return clock; |
| 44 | } |
| 45 | EXPORT_SYMBOL_GPL(trace_clock_local); |
| 46 | |
| 47 | /* |
| 48 | * trace_clock(): 'between' trace clock. Not completely serialized, |
| 49 | * but not completely incorrect when crossing CPUs either. |
| 50 | * |
| 51 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of |
| 52 | * jitter between CPUs. So it's a pretty scalable clock, but there |
| 53 | * can be offsets in the trace data. |
| 54 | */ |
| 55 | u64 notrace trace_clock(void) |
| 56 | { |
| 57 | return local_clock(); |
| 58 | } |
| 59 | EXPORT_SYMBOL_GPL(trace_clock); |
| 60 | |
| 61 | /* |
| 62 | * trace_jiffy_clock(): Simply use jiffies as a clock counter. |
| 63 | * Note that this use of jiffies_64 is not completely safe on |
| 64 | * 32-bit systems. But the window is tiny, and the effect if |
| 65 | * we are affected is that we will have an obviously bogus |
| 66 | * timestamp on a trace event - i.e. not life threatening. |
| 67 | */ |
| 68 | u64 notrace trace_clock_jiffies(void) |
| 69 | { |
| 70 | return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); |
| 71 | } |
| 72 | EXPORT_SYMBOL_GPL(trace_clock_jiffies); |
| 73 | |
| 74 | /* |
| 75 | * trace_clock_global(): special globally coherent trace clock |
| 76 | * |
| 77 | * It has higher overhead than the other trace clocks but is still |
| 78 | * an order of magnitude faster than GTOD derived hardware clocks. |
| 79 | * |
| 80 | * Used by plugins that need globally coherent timestamps. |
| 81 | */ |
| 82 | |
| 83 | /* keep prev_time and lock in the same cacheline. */ |
| 84 | static struct { |
| 85 | u64 prev_time; |
| 86 | arch_spinlock_t lock; |
| 87 | } trace_clock_struct ____cacheline_aligned_in_smp = |
| 88 | { |
| 89 | .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
| 90 | }; |
| 91 | |
| 92 | u64 notrace trace_clock_global(void) |
| 93 | { |
| 94 | unsigned long flags; |
| 95 | int this_cpu; |
| 96 | u64 now; |
| 97 | |
| 98 | local_irq_save(flags); |
| 99 | |
| 100 | this_cpu = raw_smp_processor_id(); |
| 101 | now = sched_clock_cpu(this_cpu); |
| 102 | /* |
| 103 | * If in an NMI context then dont risk lockups and return the |
| 104 | * cpu_clock() time: |
| 105 | */ |
| 106 | if (unlikely(in_nmi())) |
| 107 | goto out; |
| 108 | |
| 109 | arch_spin_lock(&trace_clock_struct.lock); |
| 110 | |
| 111 | /* |
| 112 | * TODO: if this happens often then maybe we should reset |
| 113 | * my_scd->clock to prev_time+1, to make sure |
| 114 | * we start ticking with the local clock from now on? |
| 115 | */ |
| 116 | if ((s64)(now - trace_clock_struct.prev_time) < 0) |
| 117 | now = trace_clock_struct.prev_time + 1; |
| 118 | |
| 119 | trace_clock_struct.prev_time = now; |
| 120 | |
| 121 | arch_spin_unlock(&trace_clock_struct.lock); |
| 122 | |
| 123 | out: |
| 124 | local_irq_restore(flags); |
| 125 | |
| 126 | return now; |
| 127 | } |
| 128 | EXPORT_SYMBOL_GPL(trace_clock_global); |
| 129 | |
| 130 | static atomic64_t trace_counter; |
| 131 | |
| 132 | /* |
| 133 | * trace_clock_counter(): simply an atomic counter. |
| 134 | * Use the trace_counter "counter" for cases where you do not care |
| 135 | * about timings, but are interested in strict ordering. |
| 136 | */ |
| 137 | u64 notrace trace_clock_counter(void) |
| 138 | { |
| 139 | return atomic64_add_return(1, &trace_counter); |
| 140 | } |