Commit | Line | Data |
---|---|---|
14131f2f IM |
1 | /* |
2 | * tracing clocks | |
3 | * | |
4 | * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
5 | * | |
6 | * Implements 3 trace clock variants, with differing scalability/precision | |
7 | * tradeoffs: | |
8 | * | |
9 | * - local: CPU-local trace clock | |
10 | * - medium: scalable global clock with some jitter | |
11 | * - global: globally monotonic, serialized clock | |
12 | * | |
13 | * Tracer plugins will chose a default from these clocks. | |
14 | */ | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/hardirq.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/ktime.h> | |
21 | ||
22 | /* | |
23 | * trace_clock_local(): the simplest and least coherent tracing clock. | |
24 | * | |
25 | * Useful for tracing that does not cross to other CPUs nor | |
26 | * does it go through idle events. | |
27 | */ | |
28 | u64 notrace trace_clock_local(void) | |
29 | { | |
6cc3c6e1 PZ |
30 | unsigned long flags; |
31 | u64 clock; | |
32 | ||
14131f2f IM |
33 | /* |
34 | * sched_clock() is an architecture implemented, fast, scalable, | |
35 | * lockless clock. It is not guaranteed to be coherent across | |
36 | * CPUs, nor across CPU idle events. | |
37 | */ | |
6cc3c6e1 PZ |
38 | raw_local_irq_save(flags); |
39 | clock = sched_clock(); | |
40 | raw_local_irq_restore(flags); | |
41 | ||
42 | return clock; | |
14131f2f IM |
43 | } |
44 | ||
45 | /* | |
46 | * trace_clock(): 'inbetween' trace clock. Not completely serialized, | |
47 | * but not completely incorrect when crossing CPUs either. | |
48 | * | |
49 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of | |
50 | * jitter between CPUs. So it's a pretty scalable clock, but there | |
51 | * can be offsets in the trace data. | |
52 | */ | |
53 | u64 notrace trace_clock(void) | |
54 | { | |
55 | return cpu_clock(raw_smp_processor_id()); | |
56 | } | |
57 | ||
58 | ||
59 | /* | |
60 | * trace_clock_global(): special globally coherent trace clock | |
61 | * | |
62 | * It has higher overhead than the other trace clocks but is still | |
63 | * an order of magnitude faster than GTOD derived hardware clocks. | |
64 | * | |
65 | * Used by plugins that need globally coherent timestamps. | |
66 | */ | |
67 | ||
68 | static u64 prev_trace_clock_time; | |
69 | ||
70 | static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp = | |
71 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
72 | ||
73 | u64 notrace trace_clock_global(void) | |
74 | { | |
75 | unsigned long flags; | |
76 | int this_cpu; | |
77 | u64 now; | |
78 | ||
79 | raw_local_irq_save(flags); | |
80 | ||
81 | this_cpu = raw_smp_processor_id(); | |
82 | now = cpu_clock(this_cpu); | |
83 | /* | |
84 | * If in an NMI context then dont risk lockups and return the | |
85 | * cpu_clock() time: | |
86 | */ | |
87 | if (unlikely(in_nmi())) | |
88 | goto out; | |
89 | ||
90 | __raw_spin_lock(&trace_clock_lock); | |
91 | ||
92 | /* | |
93 | * TODO: if this happens often then maybe we should reset | |
94 | * my_scd->clock to prev_trace_clock_time+1, to make sure | |
95 | * we start ticking with the local clock from now on? | |
96 | */ | |
97 | if ((s64)(now - prev_trace_clock_time) < 0) | |
98 | now = prev_trace_clock_time + 1; | |
99 | ||
100 | prev_trace_clock_time = now; | |
101 | ||
102 | __raw_spin_unlock(&trace_clock_lock); | |
103 | ||
104 | out: | |
105 | raw_local_irq_restore(flags); | |
106 | ||
107 | return now; | |
108 | } |