Commit | Line | Data |
---|---|---|
3e51f33f PZ |
1 | /* |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
5 | * | |
c300ba25 SR |
6 | * Updates and enhancements: |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
3e51f33f PZ |
9 | * Based on code by: |
10 | * Ingo Molnar <mingo@redhat.com> | |
11 | * Guillaume Chazarain <guichaz@gmail.com> | |
12 | * | |
13 | * Create a semi stable clock from a mixture of other events, including: | |
14 | * - gtod | |
3e51f33f PZ |
15 | * - sched_clock() |
16 | * - explicit idle events | |
17 | * | |
18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | |
354879bb | 19 | * making it monotonic and keeping it within an expected window. |
3e51f33f PZ |
20 | * |
21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
22 | * that is otherwise invisible (TSC gets stopped). | |
23 | * | |
24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | |
354879bb | 25 | * consistent between cpus (never more than 2 jiffies difference). |
3e51f33f | 26 | */ |
3e51f33f | 27 | #include <linux/spinlock.h> |
6409c4da | 28 | #include <linux/hardirq.h> |
3e51f33f | 29 | #include <linux/module.h> |
b342501c IM |
30 | #include <linux/percpu.h> |
31 | #include <linux/ktime.h> | |
32 | #include <linux/sched.h> | |
3e51f33f | 33 | |
2c3d103b HD |
34 | /* |
35 | * Scheduler clock - returns current time in nanosec units. | |
36 | * This is default implementation. | |
37 | * Architectures and sub-architectures can override this. | |
38 | */ | |
39 | unsigned long long __attribute__((weak)) sched_clock(void) | |
40 | { | |
92d23f70 R |
41 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
42 | * (NSEC_PER_SEC / HZ); | |
2c3d103b | 43 | } |
3e51f33f | 44 | |
c1955a3d PZ |
45 | static __read_mostly int sched_clock_running; |
46 | ||
3e51f33f | 47 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
b342501c | 48 | __read_mostly int sched_clock_stable; |
3e51f33f PZ |
49 | |
50 | struct sched_clock_data { | |
51 | /* | |
52 | * Raw spinlock - this is a special case: this might be called | |
53 | * from within instrumentation code so we dont want to do any | |
54 | * instrumentation ourselves. | |
55 | */ | |
56 | raw_spinlock_t lock; | |
57 | ||
3e51f33f PZ |
58 | u64 tick_raw; |
59 | u64 tick_gtod; | |
60 | u64 clock; | |
61 | }; | |
62 | ||
63 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
64 | ||
65 | static inline struct sched_clock_data *this_scd(void) | |
66 | { | |
67 | return &__get_cpu_var(sched_clock_data); | |
68 | } | |
69 | ||
70 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
71 | { | |
72 | return &per_cpu(sched_clock_data, cpu); | |
73 | } | |
74 | ||
75 | void sched_clock_init(void) | |
76 | { | |
77 | u64 ktime_now = ktime_to_ns(ktime_get()); | |
3e51f33f PZ |
78 | int cpu; |
79 | ||
80 | for_each_possible_cpu(cpu) { | |
81 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
82 | ||
83 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
a381759d | 84 | scd->tick_raw = 0; |
3e51f33f PZ |
85 | scd->tick_gtod = ktime_now; |
86 | scd->clock = ktime_now; | |
87 | } | |
a381759d PZ |
88 | |
89 | sched_clock_running = 1; | |
3e51f33f PZ |
90 | } |
91 | ||
354879bb | 92 | /* |
b342501c | 93 | * min, max except they take wrapping into account |
354879bb PZ |
94 | */ |
95 | ||
96 | static inline u64 wrap_min(u64 x, u64 y) | |
97 | { | |
98 | return (s64)(x - y) < 0 ? x : y; | |
99 | } | |
100 | ||
101 | static inline u64 wrap_max(u64 x, u64 y) | |
102 | { | |
103 | return (s64)(x - y) > 0 ? x : y; | |
104 | } | |
105 | ||
3e51f33f PZ |
106 | /* |
107 | * update the percpu scd from the raw @now value | |
108 | * | |
109 | * - filter out backward motion | |
354879bb | 110 | * - use the GTOD tick value to create a window to filter crazy TSC values |
3e51f33f | 111 | */ |
56b90612 | 112 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) |
3e51f33f | 113 | { |
18e4e36c | 114 | s64 delta = now - scd->tick_raw; |
354879bb | 115 | u64 clock, min_clock, max_clock; |
3e51f33f | 116 | |
354879bb PZ |
117 | if (unlikely(delta < 0)) |
118 | delta = 0; | |
3e51f33f | 119 | |
354879bb PZ |
120 | /* |
121 | * scd->clock = clamp(scd->tick_gtod + delta, | |
b342501c IM |
122 | * max(scd->tick_gtod, scd->clock), |
123 | * scd->tick_gtod + TICK_NSEC); | |
354879bb | 124 | */ |
3e51f33f | 125 | |
354879bb PZ |
126 | clock = scd->tick_gtod + delta; |
127 | min_clock = wrap_max(scd->tick_gtod, scd->clock); | |
1c5745aa | 128 | max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC); |
3e51f33f | 129 | |
354879bb PZ |
130 | clock = wrap_max(clock, min_clock); |
131 | clock = wrap_min(clock, max_clock); | |
3e51f33f | 132 | |
e4e4e534 | 133 | scd->clock = clock; |
56b90612 | 134 | |
354879bb | 135 | return scd->clock; |
3e51f33f PZ |
136 | } |
137 | ||
138 | static void lock_double_clock(struct sched_clock_data *data1, | |
139 | struct sched_clock_data *data2) | |
140 | { | |
141 | if (data1 < data2) { | |
142 | __raw_spin_lock(&data1->lock); | |
143 | __raw_spin_lock(&data2->lock); | |
144 | } else { | |
145 | __raw_spin_lock(&data2->lock); | |
146 | __raw_spin_lock(&data1->lock); | |
147 | } | |
148 | } | |
149 | ||
150 | u64 sched_clock_cpu(int cpu) | |
151 | { | |
4a273f20 | 152 | u64 now, clock, this_clock, remote_clock; |
b342501c | 153 | struct sched_clock_data *scd; |
3e51f33f | 154 | |
b342501c IM |
155 | if (sched_clock_stable) |
156 | return sched_clock(); | |
a381759d | 157 | |
b342501c | 158 | scd = cpu_sdc(cpu); |
3e51f33f | 159 | |
6409c4da IM |
160 | /* |
161 | * Normally this is not called in NMI context - but if it is, | |
162 | * trying to do any locking here is totally lethal. | |
163 | */ | |
164 | if (unlikely(in_nmi())) | |
165 | return scd->clock; | |
166 | ||
a381759d PZ |
167 | if (unlikely(!sched_clock_running)) |
168 | return 0ull; | |
169 | ||
3e51f33f PZ |
170 | WARN_ON_ONCE(!irqs_disabled()); |
171 | now = sched_clock(); | |
172 | ||
173 | if (cpu != raw_smp_processor_id()) { | |
3e51f33f PZ |
174 | struct sched_clock_data *my_scd = this_scd(); |
175 | ||
176 | lock_double_clock(scd, my_scd); | |
177 | ||
4a273f20 IM |
178 | this_clock = __update_sched_clock(my_scd, now); |
179 | remote_clock = scd->clock; | |
180 | ||
181 | /* | |
182 | * Use the opportunity that we have both locks | |
183 | * taken to couple the two clocks: we take the | |
184 | * larger time as the latest time for both | |
185 | * runqueues. (this creates monotonic movement) | |
186 | */ | |
354879bb | 187 | if (likely((s64)(remote_clock - this_clock) < 0)) { |
4a273f20 IM |
188 | clock = this_clock; |
189 | scd->clock = clock; | |
190 | } else { | |
191 | /* | |
192 | * Should be rare, but possible: | |
193 | */ | |
194 | clock = remote_clock; | |
195 | my_scd->clock = remote_clock; | |
196 | } | |
3e51f33f PZ |
197 | |
198 | __raw_spin_unlock(&my_scd->lock); | |
199 | } else { | |
200 | __raw_spin_lock(&scd->lock); | |
4a273f20 | 201 | clock = __update_sched_clock(scd, now); |
3e51f33f PZ |
202 | } |
203 | ||
e4e4e534 IM |
204 | __raw_spin_unlock(&scd->lock); |
205 | ||
3e51f33f PZ |
206 | return clock; |
207 | } | |
208 | ||
209 | void sched_clock_tick(void) | |
210 | { | |
8325d9c0 | 211 | struct sched_clock_data *scd; |
3e51f33f PZ |
212 | u64 now, now_gtod; |
213 | ||
8325d9c0 PZ |
214 | if (sched_clock_stable) |
215 | return; | |
216 | ||
a381759d PZ |
217 | if (unlikely(!sched_clock_running)) |
218 | return; | |
219 | ||
3e51f33f PZ |
220 | WARN_ON_ONCE(!irqs_disabled()); |
221 | ||
8325d9c0 | 222 | scd = this_scd(); |
3e51f33f | 223 | now_gtod = ktime_to_ns(ktime_get()); |
a83bc47c | 224 | now = sched_clock(); |
3e51f33f PZ |
225 | |
226 | __raw_spin_lock(&scd->lock); | |
3e51f33f PZ |
227 | scd->tick_raw = now; |
228 | scd->tick_gtod = now_gtod; | |
354879bb | 229 | __update_sched_clock(scd, now); |
3e51f33f PZ |
230 | __raw_spin_unlock(&scd->lock); |
231 | } | |
232 | ||
233 | /* | |
234 | * We are going deep-idle (irqs are disabled): | |
235 | */ | |
236 | void sched_clock_idle_sleep_event(void) | |
237 | { | |
238 | sched_clock_cpu(smp_processor_id()); | |
239 | } | |
240 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
241 | ||
242 | /* | |
243 | * We just idled delta nanoseconds (called with irqs disabled): | |
244 | */ | |
245 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
246 | { | |
1c5745aa TG |
247 | if (timekeeping_suspended) |
248 | return; | |
249 | ||
354879bb | 250 | sched_clock_tick(); |
3e51f33f PZ |
251 | touch_softlockup_watchdog(); |
252 | } | |
253 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
254 | ||
8325d9c0 PZ |
255 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
256 | ||
257 | void sched_clock_init(void) | |
258 | { | |
259 | sched_clock_running = 1; | |
260 | } | |
261 | ||
262 | u64 sched_clock_cpu(int cpu) | |
263 | { | |
264 | if (unlikely(!sched_clock_running)) | |
265 | return 0; | |
266 | ||
267 | return sched_clock(); | |
268 | } | |
269 | ||
b342501c | 270 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
3e51f33f | 271 | |
76a2a6ee PZ |
272 | unsigned long long cpu_clock(int cpu) |
273 | { | |
274 | unsigned long long clock; | |
275 | unsigned long flags; | |
276 | ||
2d452c9b | 277 | local_irq_save(flags); |
76a2a6ee | 278 | clock = sched_clock_cpu(cpu); |
2d452c9b | 279 | local_irq_restore(flags); |
76a2a6ee PZ |
280 | |
281 | return clock; | |
282 | } | |
4c9fe8ad | 283 | EXPORT_SYMBOL_GPL(cpu_clock); |