Commit | Line | Data |
---|---|---|
539eb11e | 1 | /* |
2 | * This code largely moved from arch/i386/kernel/timer/timer_tsc.c | |
3 | * which was originally moved from arch/i386/kernel/time.c. | |
4 | * See comments there for proper credits. | |
5 | */ | |
6 | ||
bb29ab26 | 7 | #include <linux/sched.h> |
5d0cf410 | 8 | #include <linux/clocksource.h> |
539eb11e | 9 | #include <linux/workqueue.h> |
10 | #include <linux/cpufreq.h> | |
11 | #include <linux/jiffies.h> | |
12 | #include <linux/init.h> | |
5d0cf410 | 13 | #include <linux/dmi.h> |
539eb11e | 14 | |
5d0cf410 | 15 | #include <asm/delay.h> |
539eb11e | 16 | #include <asm/tsc.h> |
17 | #include <asm/io.h> | |
6cb9a835 | 18 | #include <asm/timer.h> |
539eb11e | 19 | |
20 | #include "mach_timer.h" | |
21 | ||
d9a5c0a4 TG |
22 | static int tsc_enabled; |
23 | ||
539eb11e | 24 | /* |
25 | * On some systems the TSC frequency does not | |
26 | * change with the cpu frequency. So we need | |
27 | * an extra value to store the TSC freq | |
28 | */ | |
29 | unsigned int tsc_khz; | |
d7e28ffe | 30 | EXPORT_SYMBOL_GPL(tsc_khz); |
539eb11e | 31 | |
664c0d3d | 32 | int tsc_disable; |
539eb11e | 33 | |
34 | #ifdef CONFIG_X86_TSC | |
35 | static int __init tsc_setup(char *str) | |
36 | { | |
37 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | |
38 | "cannot disable TSC.\n"); | |
39 | return 1; | |
40 | } | |
41 | #else | |
42 | /* | |
43 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
44 | * in cpu/common.c | |
45 | */ | |
46 | static int __init tsc_setup(char *str) | |
47 | { | |
48 | tsc_disable = 1; | |
49 | ||
50 | return 1; | |
51 | } | |
52 | #endif | |
53 | ||
54 | __setup("notsc", tsc_setup); | |
55 | ||
539eb11e | 56 | /* |
57 | * code to mark and check if the TSC is unstable | |
58 | * due to cpufreq or due to unsynced TSCs | |
59 | */ | |
60 | static int tsc_unstable; | |
61 | ||
d7e28ffe | 62 | int check_tsc_unstable(void) |
539eb11e | 63 | { |
64 | return tsc_unstable; | |
65 | } | |
d7e28ffe | 66 | EXPORT_SYMBOL_GPL(check_tsc_unstable); |
539eb11e | 67 | |
539eb11e | 68 | /* Accellerators for sched_clock() |
69 | * convert from cycles(64bits) => nanoseconds (64bits) | |
70 | * basic equation: | |
71 | * ns = cycles / (freq / ns_per_sec) | |
72 | * ns = cycles * (ns_per_sec / freq) | |
73 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
74 | * ns = cycles * (10^6 / cpu_khz) | |
75 | * | |
76 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
77 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
78 | * ns = cycles * cyc2ns_scale / SC | |
79 | * | |
80 | * And since SC is a constant power of two, we can convert the div | |
81 | * into a shift. | |
82 | * | |
83 | * We can use khz divisor instead of mhz to keep a better percision, since | |
84 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
85 | * (mathieu.desnoyers@polymtl.ca) | |
86 | * | |
87 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
88 | */ | |
688340ea | 89 | unsigned long cyc2ns_scale __read_mostly; |
539eb11e | 90 | |
91 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | |
92 | ||
93 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | |
94 | { | |
95 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | |
96 | } | |
97 | ||
539eb11e | 98 | /* |
99 | * Scheduler clock - returns current time in nanosec units. | |
100 | */ | |
688340ea | 101 | unsigned long long native_sched_clock(void) |
539eb11e | 102 | { |
103 | unsigned long long this_offset; | |
104 | ||
105 | /* | |
f9690982 | 106 | * Fall back to jiffies if there's no TSC available: |
bb29ab26 IM |
107 | * ( But note that we still use it if the TSC is marked |
108 | * unstable. We do this because unlike Time Of Day, | |
109 | * the scheduler clock tolerates small errors and it's | |
110 | * very important for it to be as fast as the platform | |
111 | * can achive it. ) | |
539eb11e | 112 | */ |
bb29ab26 | 113 | if (unlikely(!tsc_enabled && !tsc_unstable)) |
f9690982 | 114 | /* No locking but a rare wrong value is not a big deal: */ |
539eb11e | 115 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
116 | ||
117 | /* read the Time Stamp Counter: */ | |
688340ea | 118 | rdtscll(this_offset); |
539eb11e | 119 | |
120 | /* return the value in ns */ | |
121 | return cycles_2_ns(this_offset); | |
122 | } | |
123 | ||
688340ea JF |
124 | /* We need to define a real function for sched_clock, to override the |
125 | weak default version */ | |
126 | #ifdef CONFIG_PARAVIRT | |
127 | unsigned long long sched_clock(void) | |
128 | { | |
129 | return paravirt_sched_clock(); | |
130 | } | |
131 | #else | |
132 | unsigned long long sched_clock(void) | |
133 | __attribute__((alias("native_sched_clock"))); | |
134 | #endif | |
135 | ||
1182d852 | 136 | unsigned long native_calculate_cpu_khz(void) |
539eb11e | 137 | { |
138 | unsigned long long start, end; | |
139 | unsigned long count; | |
140 | u64 delta64; | |
141 | int i; | |
142 | unsigned long flags; | |
143 | ||
144 | local_irq_save(flags); | |
145 | ||
146 | /* run 3 times to ensure the cache is warm */ | |
147 | for (i = 0; i < 3; i++) { | |
148 | mach_prepare_counter(); | |
149 | rdtscll(start); | |
150 | mach_countup(&count); | |
151 | rdtscll(end); | |
152 | } | |
153 | /* | |
154 | * Error: ECTCNEVERSET | |
155 | * The CTC wasn't reliable: we got a hit on the very first read, | |
156 | * or the CPU was so fast/slow that the quotient wouldn't fit in | |
157 | * 32 bits.. | |
158 | */ | |
159 | if (count <= 1) | |
160 | goto err; | |
161 | ||
162 | delta64 = end - start; | |
163 | ||
164 | /* cpu freq too fast: */ | |
165 | if (delta64 > (1ULL<<32)) | |
166 | goto err; | |
167 | ||
168 | /* cpu freq too slow: */ | |
169 | if (delta64 <= CALIBRATE_TIME_MSEC) | |
170 | goto err; | |
171 | ||
172 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | |
173 | do_div(delta64,CALIBRATE_TIME_MSEC); | |
174 | ||
175 | local_irq_restore(flags); | |
176 | return (unsigned long)delta64; | |
177 | err: | |
178 | local_irq_restore(flags); | |
179 | return 0; | |
180 | } | |
181 | ||
182 | int recalibrate_cpu_khz(void) | |
183 | { | |
184 | #ifndef CONFIG_SMP | |
185 | unsigned long cpu_khz_old = cpu_khz; | |
186 | ||
187 | if (cpu_has_tsc) { | |
188 | cpu_khz = calculate_cpu_khz(); | |
189 | tsc_khz = cpu_khz; | |
190 | cpu_data[0].loops_per_jiffy = | |
191 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | |
192 | cpu_khz_old, cpu_khz); | |
193 | return 0; | |
194 | } else | |
195 | return -ENODEV; | |
196 | #else | |
197 | return -ENODEV; | |
198 | #endif | |
199 | } | |
200 | ||
201 | EXPORT_SYMBOL(recalibrate_cpu_khz); | |
202 | ||
539eb11e | 203 | #ifdef CONFIG_CPU_FREQ |
204 | ||
539eb11e | 205 | /* |
206 | * if the CPU frequency is scaled, TSC-based delays will need a different | |
207 | * loops_per_jiffy value to function properly. | |
208 | */ | |
209 | static unsigned int ref_freq = 0; | |
210 | static unsigned long loops_per_jiffy_ref = 0; | |
211 | static unsigned long cpu_khz_ref = 0; | |
212 | ||
213 | static int | |
214 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | |
215 | { | |
216 | struct cpufreq_freqs *freq = data; | |
217 | ||
539eb11e | 218 | if (!ref_freq) { |
219 | if (!freq->old){ | |
220 | ref_freq = freq->new; | |
df3624aa | 221 | return 0; |
539eb11e | 222 | } |
223 | ref_freq = freq->old; | |
224 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | |
225 | cpu_khz_ref = cpu_khz; | |
226 | } | |
227 | ||
228 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
229 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | |
230 | (val == CPUFREQ_RESUMECHANGE)) { | |
231 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
232 | cpu_data[freq->cpu].loops_per_jiffy = | |
233 | cpufreq_scale(loops_per_jiffy_ref, | |
234 | ref_freq, freq->new); | |
235 | ||
236 | if (cpu_khz) { | |
237 | ||
238 | if (num_online_cpus() == 1) | |
239 | cpu_khz = cpufreq_scale(cpu_khz_ref, | |
240 | ref_freq, freq->new); | |
241 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | |
242 | tsc_khz = cpu_khz; | |
243 | set_cyc2ns_scale(cpu_khz); | |
244 | /* | |
245 | * TSC based sched_clock turns | |
246 | * to junk w/ cpufreq | |
247 | */ | |
5a90cf20 | 248 | mark_tsc_unstable("cpufreq changes"); |
539eb11e | 249 | } |
250 | } | |
251 | } | |
539eb11e | 252 | |
253 | return 0; | |
254 | } | |
255 | ||
256 | static struct notifier_block time_cpufreq_notifier_block = { | |
257 | .notifier_call = time_cpufreq_notifier | |
258 | }; | |
259 | ||
260 | static int __init cpufreq_tsc(void) | |
261 | { | |
26a08eb3 TG |
262 | return cpufreq_register_notifier(&time_cpufreq_notifier_block, |
263 | CPUFREQ_TRANSITION_NOTIFIER); | |
539eb11e | 264 | } |
539eb11e | 265 | core_initcall(cpufreq_tsc); |
266 | ||
267 | #endif | |
5d0cf410 | 268 | |
269 | /* clock source code */ | |
270 | ||
271 | static unsigned long current_tsc_khz = 0; | |
5d0cf410 | 272 | |
273 | static cycle_t read_tsc(void) | |
274 | { | |
275 | cycle_t ret; | |
276 | ||
277 | rdtscll(ret); | |
278 | ||
279 | return ret; | |
280 | } | |
281 | ||
282 | static struct clocksource clocksource_tsc = { | |
283 | .name = "tsc", | |
284 | .rating = 300, | |
285 | .read = read_tsc, | |
7f9f303a | 286 | .mask = CLOCKSOURCE_MASK(64), |
5d0cf410 | 287 | .mult = 0, /* to be set */ |
288 | .shift = 22, | |
73b08d2a TG |
289 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
290 | CLOCK_SOURCE_MUST_VERIFY, | |
5d0cf410 | 291 | }; |
292 | ||
5a90cf20 | 293 | void mark_tsc_unstable(char *reason) |
5d0cf410 | 294 | { |
7e69f2b1 TG |
295 | if (!tsc_unstable) { |
296 | tsc_unstable = 1; | |
d9a5c0a4 | 297 | tsc_enabled = 0; |
5a90cf20 | 298 | printk("Marking TSC unstable due to: %s.\n", reason); |
7e69f2b1 TG |
299 | /* Can be called before registration */ |
300 | if (clocksource_tsc.mult) | |
301 | clocksource_change_rating(&clocksource_tsc, 0); | |
302 | else | |
303 | clocksource_tsc.rating = 0; | |
5d0cf410 | 304 | } |
5d0cf410 | 305 | } |
7e69f2b1 | 306 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); |
5d0cf410 | 307 | |
1855256c | 308 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) |
5d0cf410 | 309 | { |
310 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | |
311 | d->ident); | |
7e69f2b1 | 312 | tsc_unstable = 1; |
5d0cf410 | 313 | return 0; |
314 | } | |
315 | ||
316 | /* List of systems that have known TSC problems */ | |
317 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | |
318 | { | |
319 | .callback = dmi_mark_tsc_unstable, | |
320 | .ident = "IBM Thinkpad 380XD", | |
321 | .matches = { | |
322 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | |
323 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | |
324 | }, | |
325 | }, | |
326 | {} | |
327 | }; | |
328 | ||
5d0cf410 | 329 | /* |
330 | * Make an educated guess if the TSC is trustworthy and synchronized | |
331 | * over all CPUs. | |
332 | */ | |
95492e46 | 333 | __cpuinit int unsynchronized_tsc(void) |
5d0cf410 | 334 | { |
95492e46 IM |
335 | if (!cpu_has_tsc || tsc_unstable) |
336 | return 1; | |
5d0cf410 | 337 | /* |
338 | * Intel systems are normally all synchronized. | |
339 | * Exceptions must mark TSC as unstable: | |
340 | */ | |
7e69f2b1 TG |
341 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
342 | /* assume multi socket systems are not synchronized: */ | |
343 | if (num_possible_cpus() > 1) | |
344 | tsc_unstable = 1; | |
345 | } | |
346 | return tsc_unstable; | |
5d0cf410 | 347 | } |
348 | ||
07190a08 MT |
349 | /* |
350 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | |
351 | */ | |
352 | #ifdef CONFIG_MGEODE_LX | |
353 | /* RTSC counts during suspend */ | |
354 | #define RTSC_SUSP 0x100 | |
355 | ||
356 | static void __init check_geode_tsc_reliable(void) | |
357 | { | |
358 | unsigned long val; | |
359 | ||
360 | rdmsrl(MSR_GEODE_BUSCONT_CONF0, val); | |
361 | if ((val & RTSC_SUSP)) | |
362 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
363 | } | |
364 | #else | |
365 | static inline void check_geode_tsc_reliable(void) { } | |
366 | #endif | |
367 | ||
6bb74df4 | 368 | |
369 | void __init tsc_init(void) | |
5d0cf410 | 370 | { |
6bb74df4 | 371 | if (!cpu_has_tsc || tsc_disable) |
372 | goto out_no_tsc; | |
5d0cf410 | 373 | |
6bb74df4 | 374 | cpu_khz = calculate_cpu_khz(); |
375 | tsc_khz = cpu_khz; | |
5d0cf410 | 376 | |
6bb74df4 | 377 | if (!cpu_khz) |
378 | goto out_no_tsc; | |
5d0cf410 | 379 | |
6bb74df4 | 380 | printk("Detected %lu.%03lu MHz processor.\n", |
381 | (unsigned long)cpu_khz / 1000, | |
382 | (unsigned long)cpu_khz % 1000); | |
383 | ||
384 | set_cyc2ns_scale(cpu_khz); | |
385 | use_tsc_delay(); | |
386 | ||
387 | /* Check and install the TSC clocksource */ | |
388 | dmi_check_system(bad_tsc_dmi_table); | |
389 | ||
390 | unsynchronized_tsc(); | |
391 | check_geode_tsc_reliable(); | |
392 | current_tsc_khz = tsc_khz; | |
393 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | |
394 | clocksource_tsc.shift); | |
395 | /* lower the rating if we already know its unstable: */ | |
396 | if (check_tsc_unstable()) { | |
397 | clocksource_tsc.rating = 0; | |
398 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | |
d9a5c0a4 TG |
399 | } else |
400 | tsc_enabled = 1; | |
401 | ||
6bb74df4 | 402 | clocksource_register(&clocksource_tsc); |
5d0cf410 | 403 | |
6bb74df4 | 404 | return; |
5d0cf410 | 405 | |
6bb74df4 | 406 | out_no_tsc: |
407 | /* | |
408 | * Set the tsc_disable flag if there's no TSC support, this | |
409 | * makes it a fast flag for the kernel to see whether it | |
410 | * should be using the TSC. | |
411 | */ | |
412 | tsc_disable = 1; | |
413 | } |