Commit | Line | Data |
---|---|---|
c767a54b JP |
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2 | ||
bfc0f594 | 3 | #include <linux/kernel.h> |
0ef95533 AK |
4 | #include <linux/sched.h> |
5 | #include <linux/init.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/timer.h> | |
bfc0f594 | 8 | #include <linux/acpi_pmtmr.h> |
2dbe06fa | 9 | #include <linux/cpufreq.h> |
8fbbc4b4 AK |
10 | #include <linux/delay.h> |
11 | #include <linux/clocksource.h> | |
12 | #include <linux/percpu.h> | |
08604bd9 | 13 | #include <linux/timex.h> |
bfc0f594 AK |
14 | |
15 | #include <asm/hpet.h> | |
8fbbc4b4 AK |
16 | #include <asm/timer.h> |
17 | #include <asm/vgtod.h> | |
18 | #include <asm/time.h> | |
19 | #include <asm/delay.h> | |
88b094fb | 20 | #include <asm/hypervisor.h> |
08047c4f | 21 | #include <asm/nmi.h> |
2d826404 | 22 | #include <asm/x86_init.h> |
0ef95533 | 23 | |
f24ade3a | 24 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
0ef95533 | 25 | EXPORT_SYMBOL(cpu_khz); |
f24ade3a IM |
26 | |
27 | unsigned int __read_mostly tsc_khz; | |
0ef95533 AK |
28 | EXPORT_SYMBOL(tsc_khz); |
29 | ||
30 | /* | |
31 | * TSC can be unstable due to cpufreq or due to unsynced TSCs | |
32 | */ | |
f24ade3a | 33 | static int __read_mostly tsc_unstable; |
0ef95533 AK |
34 | |
35 | /* native_sched_clock() is called before tsc_init(), so | |
36 | we must start with the TSC soft disabled to prevent | |
37 | erroneous rdtsc usage on !cpu_has_tsc processors */ | |
f24ade3a | 38 | static int __read_mostly tsc_disabled = -1; |
0ef95533 | 39 | |
28a00184 | 40 | int tsc_clocksource_reliable; |
57c67da2 PZ |
41 | |
42 | /* Accelerators for sched_clock() | |
43 | * convert from cycles(64bits) => nanoseconds (64bits) | |
44 | * basic equation: | |
45 | * ns = cycles / (freq / ns_per_sec) | |
46 | * ns = cycles * (ns_per_sec / freq) | |
47 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
48 | * ns = cycles * (10^6 / cpu_khz) | |
49 | * | |
50 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
51 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
52 | * ns = cycles * cyc2ns_scale / SC | |
53 | * | |
54 | * And since SC is a constant power of two, we can convert the div | |
55 | * into a shift. | |
56 | * | |
57 | * We can use khz divisor instead of mhz to keep a better precision, since | |
58 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
59 | * (mathieu.desnoyers@polymtl.ca) | |
60 | * | |
61 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
62 | */ | |
63 | ||
64 | DEFINE_PER_CPU(unsigned long, cyc2ns); | |
65 | DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); | |
66 | ||
67 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | |
68 | ||
69 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |
70 | { | |
71 | unsigned long long ns = this_cpu_read(cyc2ns_offset); | |
72 | ns += mul_u64_u32_shr(cyc, this_cpu_read(cyc2ns), CYC2NS_SCALE_FACTOR); | |
73 | return ns; | |
74 | } | |
75 | ||
76 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |
77 | { | |
78 | unsigned long long tsc_now, ns_now, *offset; | |
79 | unsigned long flags, *scale; | |
80 | ||
81 | local_irq_save(flags); | |
82 | sched_clock_idle_sleep_event(); | |
83 | ||
84 | scale = &per_cpu(cyc2ns, cpu); | |
85 | offset = &per_cpu(cyc2ns_offset, cpu); | |
86 | ||
87 | rdtscll(tsc_now); | |
88 | ns_now = cycles_2_ns(tsc_now); | |
89 | ||
90 | if (cpu_khz) { | |
91 | *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) + | |
92 | cpu_khz / 2) / cpu_khz; | |
93 | *offset = ns_now - mult_frac(tsc_now, *scale, | |
94 | (1UL << CYC2NS_SCALE_FACTOR)); | |
95 | } | |
96 | ||
97 | sched_clock_idle_wakeup_event(0); | |
98 | local_irq_restore(flags); | |
99 | } | |
100 | ||
0ef95533 AK |
101 | /* |
102 | * Scheduler clock - returns current time in nanosec units. | |
103 | */ | |
104 | u64 native_sched_clock(void) | |
105 | { | |
106 | u64 this_offset; | |
107 | ||
108 | /* | |
109 | * Fall back to jiffies if there's no TSC available: | |
110 | * ( But note that we still use it if the TSC is marked | |
111 | * unstable. We do this because unlike Time Of Day, | |
112 | * the scheduler clock tolerates small errors and it's | |
113 | * very important for it to be as fast as the platform | |
3ad2f3fb | 114 | * can achieve it. ) |
0ef95533 AK |
115 | */ |
116 | if (unlikely(tsc_disabled)) { | |
117 | /* No locking but a rare wrong value is not a big deal: */ | |
118 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | |
119 | } | |
120 | ||
121 | /* read the Time Stamp Counter: */ | |
122 | rdtscll(this_offset); | |
123 | ||
124 | /* return the value in ns */ | |
57c67da2 | 125 | return cycles_2_ns(this_offset); |
0ef95533 AK |
126 | } |
127 | ||
128 | /* We need to define a real function for sched_clock, to override the | |
129 | weak default version */ | |
130 | #ifdef CONFIG_PARAVIRT | |
131 | unsigned long long sched_clock(void) | |
132 | { | |
133 | return paravirt_sched_clock(); | |
134 | } | |
135 | #else | |
136 | unsigned long long | |
137 | sched_clock(void) __attribute__((alias("native_sched_clock"))); | |
138 | #endif | |
139 | ||
ce37f400 DV |
140 | unsigned long long native_read_tsc(void) |
141 | { | |
142 | return __native_read_tsc(); | |
143 | } | |
144 | EXPORT_SYMBOL(native_read_tsc); | |
145 | ||
0ef95533 AK |
146 | int check_tsc_unstable(void) |
147 | { | |
148 | return tsc_unstable; | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | |
151 | ||
c73deb6a AH |
152 | int check_tsc_disabled(void) |
153 | { | |
154 | return tsc_disabled; | |
155 | } | |
156 | EXPORT_SYMBOL_GPL(check_tsc_disabled); | |
157 | ||
0ef95533 AK |
158 | #ifdef CONFIG_X86_TSC |
159 | int __init notsc_setup(char *str) | |
160 | { | |
c767a54b | 161 | pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); |
0ef95533 AK |
162 | tsc_disabled = 1; |
163 | return 1; | |
164 | } | |
165 | #else | |
166 | /* | |
167 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
168 | * in cpu/common.c | |
169 | */ | |
170 | int __init notsc_setup(char *str) | |
171 | { | |
172 | setup_clear_cpu_cap(X86_FEATURE_TSC); | |
173 | return 1; | |
174 | } | |
175 | #endif | |
176 | ||
177 | __setup("notsc", notsc_setup); | |
bfc0f594 | 178 | |
e82b8e4e VP |
179 | static int no_sched_irq_time; |
180 | ||
395628ef AK |
181 | static int __init tsc_setup(char *str) |
182 | { | |
183 | if (!strcmp(str, "reliable")) | |
184 | tsc_clocksource_reliable = 1; | |
e82b8e4e VP |
185 | if (!strncmp(str, "noirqtime", 9)) |
186 | no_sched_irq_time = 1; | |
395628ef AK |
187 | return 1; |
188 | } | |
189 | ||
190 | __setup("tsc=", tsc_setup); | |
191 | ||
bfc0f594 AK |
192 | #define MAX_RETRIES 5 |
193 | #define SMI_TRESHOLD 50000 | |
194 | ||
195 | /* | |
196 | * Read TSC and the reference counters. Take care of SMI disturbance | |
197 | */ | |
827014be | 198 | static u64 tsc_read_refs(u64 *p, int hpet) |
bfc0f594 AK |
199 | { |
200 | u64 t1, t2; | |
201 | int i; | |
202 | ||
203 | for (i = 0; i < MAX_RETRIES; i++) { | |
204 | t1 = get_cycles(); | |
205 | if (hpet) | |
827014be | 206 | *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; |
bfc0f594 | 207 | else |
827014be | 208 | *p = acpi_pm_read_early(); |
bfc0f594 AK |
209 | t2 = get_cycles(); |
210 | if ((t2 - t1) < SMI_TRESHOLD) | |
211 | return t2; | |
212 | } | |
213 | return ULLONG_MAX; | |
214 | } | |
215 | ||
d683ef7a TG |
216 | /* |
217 | * Calculate the TSC frequency from HPET reference | |
bfc0f594 | 218 | */ |
d683ef7a | 219 | static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) |
bfc0f594 | 220 | { |
d683ef7a | 221 | u64 tmp; |
bfc0f594 | 222 | |
d683ef7a TG |
223 | if (hpet2 < hpet1) |
224 | hpet2 += 0x100000000ULL; | |
225 | hpet2 -= hpet1; | |
226 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | |
227 | do_div(tmp, 1000000); | |
228 | do_div(deltatsc, tmp); | |
229 | ||
230 | return (unsigned long) deltatsc; | |
231 | } | |
232 | ||
233 | /* | |
234 | * Calculate the TSC frequency from PMTimer reference | |
235 | */ | |
236 | static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) | |
237 | { | |
238 | u64 tmp; | |
bfc0f594 | 239 | |
d683ef7a TG |
240 | if (!pm1 && !pm2) |
241 | return ULONG_MAX; | |
242 | ||
243 | if (pm2 < pm1) | |
244 | pm2 += (u64)ACPI_PM_OVRRUN; | |
245 | pm2 -= pm1; | |
246 | tmp = pm2 * 1000000000LL; | |
247 | do_div(tmp, PMTMR_TICKS_PER_SEC); | |
248 | do_div(deltatsc, tmp); | |
249 | ||
250 | return (unsigned long) deltatsc; | |
251 | } | |
252 | ||
a977c400 | 253 | #define CAL_MS 10 |
b7743970 | 254 | #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) |
a977c400 TG |
255 | #define CAL_PIT_LOOPS 1000 |
256 | ||
257 | #define CAL2_MS 50 | |
b7743970 | 258 | #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) |
a977c400 TG |
259 | #define CAL2_PIT_LOOPS 5000 |
260 | ||
cce3e057 | 261 | |
ec0c15af LT |
262 | /* |
263 | * Try to calibrate the TSC against the Programmable | |
264 | * Interrupt Timer and return the frequency of the TSC | |
265 | * in kHz. | |
266 | * | |
267 | * Return ULONG_MAX on failure to calibrate. | |
268 | */ | |
a977c400 | 269 | static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) |
ec0c15af LT |
270 | { |
271 | u64 tsc, t1, t2, delta; | |
272 | unsigned long tscmin, tscmax; | |
273 | int pitcnt; | |
274 | ||
275 | /* Set the Gate high, disable speaker */ | |
276 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | |
277 | ||
278 | /* | |
279 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | |
280 | * count mode), binary count. Set the latch register to 50ms | |
281 | * (LSB then MSB) to begin countdown. | |
282 | */ | |
283 | outb(0xb0, 0x43); | |
a977c400 TG |
284 | outb(latch & 0xff, 0x42); |
285 | outb(latch >> 8, 0x42); | |
ec0c15af LT |
286 | |
287 | tsc = t1 = t2 = get_cycles(); | |
288 | ||
289 | pitcnt = 0; | |
290 | tscmax = 0; | |
291 | tscmin = ULONG_MAX; | |
292 | while ((inb(0x61) & 0x20) == 0) { | |
293 | t2 = get_cycles(); | |
294 | delta = t2 - tsc; | |
295 | tsc = t2; | |
296 | if ((unsigned long) delta < tscmin) | |
297 | tscmin = (unsigned int) delta; | |
298 | if ((unsigned long) delta > tscmax) | |
299 | tscmax = (unsigned int) delta; | |
300 | pitcnt++; | |
301 | } | |
302 | ||
303 | /* | |
304 | * Sanity checks: | |
305 | * | |
a977c400 | 306 | * If we were not able to read the PIT more than loopmin |
ec0c15af LT |
307 | * times, then we have been hit by a massive SMI |
308 | * | |
309 | * If the maximum is 10 times larger than the minimum, | |
310 | * then we got hit by an SMI as well. | |
311 | */ | |
a977c400 | 312 | if (pitcnt < loopmin || tscmax > 10 * tscmin) |
ec0c15af LT |
313 | return ULONG_MAX; |
314 | ||
315 | /* Calculate the PIT value */ | |
316 | delta = t2 - t1; | |
a977c400 | 317 | do_div(delta, ms); |
ec0c15af LT |
318 | return delta; |
319 | } | |
320 | ||
6ac40ed0 LT |
321 | /* |
322 | * This reads the current MSB of the PIT counter, and | |
323 | * checks if we are running on sufficiently fast and | |
324 | * non-virtualized hardware. | |
325 | * | |
326 | * Our expectations are: | |
327 | * | |
328 | * - the PIT is running at roughly 1.19MHz | |
329 | * | |
330 | * - each IO is going to take about 1us on real hardware, | |
331 | * but we allow it to be much faster (by a factor of 10) or | |
332 | * _slightly_ slower (ie we allow up to a 2us read+counter | |
333 | * update - anything else implies a unacceptably slow CPU | |
334 | * or PIT for the fast calibration to work. | |
335 | * | |
336 | * - with 256 PIT ticks to read the value, we have 214us to | |
337 | * see the same MSB (and overhead like doing a single TSC | |
338 | * read per MSB value etc). | |
339 | * | |
340 | * - We're doing 2 reads per loop (LSB, MSB), and we expect | |
341 | * them each to take about a microsecond on real hardware. | |
342 | * So we expect a count value of around 100. But we'll be | |
343 | * generous, and accept anything over 50. | |
344 | * | |
345 | * - if the PIT is stuck, and we see *many* more reads, we | |
346 | * return early (and the next caller of pit_expect_msb() | |
347 | * then consider it a failure when they don't see the | |
348 | * next expected value). | |
349 | * | |
350 | * These expectations mean that we know that we have seen the | |
351 | * transition from one expected value to another with a fairly | |
352 | * high accuracy, and we didn't miss any events. We can thus | |
353 | * use the TSC value at the transitions to calculate a pretty | |
354 | * good value for the TSC frequencty. | |
355 | */ | |
b6e61eef LT |
356 | static inline int pit_verify_msb(unsigned char val) |
357 | { | |
358 | /* Ignore LSB */ | |
359 | inb(0x42); | |
360 | return inb(0x42) == val; | |
361 | } | |
362 | ||
9e8912e0 | 363 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
6ac40ed0 | 364 | { |
9e8912e0 | 365 | int count; |
68f30fbe | 366 | u64 tsc = 0, prev_tsc = 0; |
bfc0f594 | 367 | |
6ac40ed0 | 368 | for (count = 0; count < 50000; count++) { |
b6e61eef | 369 | if (!pit_verify_msb(val)) |
6ac40ed0 | 370 | break; |
68f30fbe | 371 | prev_tsc = tsc; |
9e8912e0 | 372 | tsc = get_cycles(); |
6ac40ed0 | 373 | } |
68f30fbe | 374 | *deltap = get_cycles() - prev_tsc; |
9e8912e0 LT |
375 | *tscp = tsc; |
376 | ||
377 | /* | |
378 | * We require _some_ success, but the quality control | |
379 | * will be based on the error terms on the TSC values. | |
380 | */ | |
381 | return count > 5; | |
6ac40ed0 LT |
382 | } |
383 | ||
384 | /* | |
9e8912e0 LT |
385 | * How many MSB values do we want to see? We aim for |
386 | * a maximum error rate of 500ppm (in practice the | |
387 | * real error is much smaller), but refuse to spend | |
68f30fbe | 388 | * more than 50ms on it. |
6ac40ed0 | 389 | */ |
68f30fbe | 390 | #define MAX_QUICK_PIT_MS 50 |
9e8912e0 | 391 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) |
bfc0f594 | 392 | |
6ac40ed0 LT |
393 | static unsigned long quick_pit_calibrate(void) |
394 | { | |
9e8912e0 LT |
395 | int i; |
396 | u64 tsc, delta; | |
397 | unsigned long d1, d2; | |
398 | ||
6ac40ed0 | 399 | /* Set the Gate high, disable speaker */ |
bfc0f594 AK |
400 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
401 | ||
6ac40ed0 LT |
402 | /* |
403 | * Counter 2, mode 0 (one-shot), binary count | |
404 | * | |
405 | * NOTE! Mode 2 decrements by two (and then the | |
406 | * output is flipped each time, giving the same | |
407 | * final output frequency as a decrement-by-one), | |
408 | * so mode 0 is much better when looking at the | |
409 | * individual counts. | |
410 | */ | |
bfc0f594 | 411 | outb(0xb0, 0x43); |
bfc0f594 | 412 | |
6ac40ed0 LT |
413 | /* Start at 0xffff */ |
414 | outb(0xff, 0x42); | |
415 | outb(0xff, 0x42); | |
416 | ||
a6a80e1d LT |
417 | /* |
418 | * The PIT starts counting at the next edge, so we | |
419 | * need to delay for a microsecond. The easiest way | |
420 | * to do that is to just read back the 16-bit counter | |
421 | * once from the PIT. | |
422 | */ | |
b6e61eef | 423 | pit_verify_msb(0); |
a6a80e1d | 424 | |
9e8912e0 LT |
425 | if (pit_expect_msb(0xff, &tsc, &d1)) { |
426 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | |
427 | if (!pit_expect_msb(0xff-i, &delta, &d2)) | |
428 | break; | |
429 | ||
430 | /* | |
431 | * Iterate until the error is less than 500 ppm | |
432 | */ | |
433 | delta -= tsc; | |
b6e61eef LT |
434 | if (d1+d2 >= delta >> 11) |
435 | continue; | |
436 | ||
437 | /* | |
438 | * Check the PIT one more time to verify that | |
439 | * all TSC reads were stable wrt the PIT. | |
440 | * | |
441 | * This also guarantees serialization of the | |
442 | * last cycle read ('d2') in pit_expect_msb. | |
443 | */ | |
444 | if (!pit_verify_msb(0xfe - i)) | |
445 | break; | |
446 | goto success; | |
6ac40ed0 | 447 | } |
6ac40ed0 | 448 | } |
c767a54b | 449 | pr_err("Fast TSC calibration failed\n"); |
6ac40ed0 | 450 | return 0; |
9e8912e0 LT |
451 | |
452 | success: | |
453 | /* | |
454 | * Ok, if we get here, then we've seen the | |
455 | * MSB of the PIT decrement 'i' times, and the | |
456 | * error has shrunk to less than 500 ppm. | |
457 | * | |
458 | * As a result, we can depend on there not being | |
459 | * any odd delays anywhere, and the TSC reads are | |
68f30fbe | 460 | * reliable (within the error). |
9e8912e0 LT |
461 | * |
462 | * kHz = ticks / time-in-seconds / 1000; | |
463 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 | |
464 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) | |
465 | */ | |
9e8912e0 LT |
466 | delta *= PIT_TICK_RATE; |
467 | do_div(delta, i*256*1000); | |
c767a54b | 468 | pr_info("Fast TSC calibration using PIT\n"); |
9e8912e0 | 469 | return delta; |
6ac40ed0 | 470 | } |
ec0c15af | 471 | |
bfc0f594 | 472 | /** |
e93ef949 | 473 | * native_calibrate_tsc - calibrate the tsc on boot |
bfc0f594 | 474 | */ |
e93ef949 | 475 | unsigned long native_calibrate_tsc(void) |
bfc0f594 | 476 | { |
827014be | 477 | u64 tsc1, tsc2, delta, ref1, ref2; |
fbb16e24 | 478 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
2d826404 | 479 | unsigned long flags, latch, ms, fast_calibrate; |
a977c400 | 480 | int hpet = is_hpet_enabled(), i, loopmin; |
bfc0f594 | 481 | |
6ac40ed0 LT |
482 | local_irq_save(flags); |
483 | fast_calibrate = quick_pit_calibrate(); | |
bfc0f594 | 484 | local_irq_restore(flags); |
6ac40ed0 LT |
485 | if (fast_calibrate) |
486 | return fast_calibrate; | |
bfc0f594 | 487 | |
fbb16e24 TG |
488 | /* |
489 | * Run 5 calibration loops to get the lowest frequency value | |
490 | * (the best estimate). We use two different calibration modes | |
491 | * here: | |
492 | * | |
493 | * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and | |
494 | * load a timeout of 50ms. We read the time right after we | |
495 | * started the timer and wait until the PIT count down reaches | |
496 | * zero. In each wait loop iteration we read the TSC and check | |
497 | * the delta to the previous read. We keep track of the min | |
498 | * and max values of that delta. The delta is mostly defined | |
499 | * by the IO time of the PIT access, so we can detect when a | |
0d2eb44f | 500 | * SMI/SMM disturbance happened between the two reads. If the |
fbb16e24 TG |
501 | * maximum time is significantly larger than the minimum time, |
502 | * then we discard the result and have another try. | |
503 | * | |
504 | * 2) Reference counter. If available we use the HPET or the | |
505 | * PMTIMER as a reference to check the sanity of that value. | |
506 | * We use separate TSC readouts and check inside of the | |
507 | * reference read for a SMI/SMM disturbance. We dicard | |
508 | * disturbed values here as well. We do that around the PIT | |
509 | * calibration delay loop as we have to wait for a certain | |
510 | * amount of time anyway. | |
511 | */ | |
a977c400 TG |
512 | |
513 | /* Preset PIT loop values */ | |
514 | latch = CAL_LATCH; | |
515 | ms = CAL_MS; | |
516 | loopmin = CAL_PIT_LOOPS; | |
517 | ||
518 | for (i = 0; i < 3; i++) { | |
ec0c15af | 519 | unsigned long tsc_pit_khz; |
fbb16e24 TG |
520 | |
521 | /* | |
522 | * Read the start value and the reference count of | |
ec0c15af LT |
523 | * hpet/pmtimer when available. Then do the PIT |
524 | * calibration, which will take at least 50ms, and | |
525 | * read the end value. | |
fbb16e24 | 526 | */ |
ec0c15af | 527 | local_irq_save(flags); |
827014be | 528 | tsc1 = tsc_read_refs(&ref1, hpet); |
a977c400 | 529 | tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); |
827014be | 530 | tsc2 = tsc_read_refs(&ref2, hpet); |
fbb16e24 TG |
531 | local_irq_restore(flags); |
532 | ||
ec0c15af LT |
533 | /* Pick the lowest PIT TSC calibration so far */ |
534 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); | |
fbb16e24 TG |
535 | |
536 | /* hpet or pmtimer available ? */ | |
62627bec | 537 | if (ref1 == ref2) |
fbb16e24 TG |
538 | continue; |
539 | ||
540 | /* Check, whether the sampling was disturbed by an SMI */ | |
541 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) | |
542 | continue; | |
543 | ||
544 | tsc2 = (tsc2 - tsc1) * 1000000LL; | |
d683ef7a | 545 | if (hpet) |
827014be | 546 | tsc2 = calc_hpet_ref(tsc2, ref1, ref2); |
d683ef7a | 547 | else |
827014be | 548 | tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); |
fbb16e24 | 549 | |
fbb16e24 | 550 | tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); |
a977c400 TG |
551 | |
552 | /* Check the reference deviation */ | |
553 | delta = ((u64) tsc_pit_min) * 100; | |
554 | do_div(delta, tsc_ref_min); | |
555 | ||
556 | /* | |
557 | * If both calibration results are inside a 10% window | |
558 | * then we can be sure, that the calibration | |
559 | * succeeded. We break out of the loop right away. We | |
560 | * use the reference value, as it is more precise. | |
561 | */ | |
562 | if (delta >= 90 && delta <= 110) { | |
c767a54b JP |
563 | pr_info("PIT calibration matches %s. %d loops\n", |
564 | hpet ? "HPET" : "PMTIMER", i + 1); | |
a977c400 | 565 | return tsc_ref_min; |
fbb16e24 TG |
566 | } |
567 | ||
a977c400 TG |
568 | /* |
569 | * Check whether PIT failed more than once. This | |
570 | * happens in virtualized environments. We need to | |
571 | * give the virtual PC a slightly longer timeframe for | |
572 | * the HPET/PMTIMER to make the result precise. | |
573 | */ | |
574 | if (i == 1 && tsc_pit_min == ULONG_MAX) { | |
575 | latch = CAL2_LATCH; | |
576 | ms = CAL2_MS; | |
577 | loopmin = CAL2_PIT_LOOPS; | |
578 | } | |
fbb16e24 | 579 | } |
bfc0f594 AK |
580 | |
581 | /* | |
fbb16e24 | 582 | * Now check the results. |
bfc0f594 | 583 | */ |
fbb16e24 TG |
584 | if (tsc_pit_min == ULONG_MAX) { |
585 | /* PIT gave no useful value */ | |
c767a54b | 586 | pr_warn("Unable to calibrate against PIT\n"); |
fbb16e24 TG |
587 | |
588 | /* We don't have an alternative source, disable TSC */ | |
827014be | 589 | if (!hpet && !ref1 && !ref2) { |
c767a54b | 590 | pr_notice("No reference (HPET/PMTIMER) available\n"); |
fbb16e24 TG |
591 | return 0; |
592 | } | |
593 | ||
594 | /* The alternative source failed as well, disable TSC */ | |
595 | if (tsc_ref_min == ULONG_MAX) { | |
c767a54b | 596 | pr_warn("HPET/PMTIMER calibration failed\n"); |
fbb16e24 TG |
597 | return 0; |
598 | } | |
599 | ||
600 | /* Use the alternative source */ | |
c767a54b JP |
601 | pr_info("using %s reference calibration\n", |
602 | hpet ? "HPET" : "PMTIMER"); | |
fbb16e24 TG |
603 | |
604 | return tsc_ref_min; | |
605 | } | |
bfc0f594 | 606 | |
fbb16e24 | 607 | /* We don't have an alternative source, use the PIT calibration value */ |
827014be | 608 | if (!hpet && !ref1 && !ref2) { |
c767a54b | 609 | pr_info("Using PIT calibration value\n"); |
fbb16e24 | 610 | return tsc_pit_min; |
bfc0f594 AK |
611 | } |
612 | ||
fbb16e24 TG |
613 | /* The alternative source failed, use the PIT calibration value */ |
614 | if (tsc_ref_min == ULONG_MAX) { | |
c767a54b | 615 | pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); |
fbb16e24 | 616 | return tsc_pit_min; |
bfc0f594 AK |
617 | } |
618 | ||
fbb16e24 TG |
619 | /* |
620 | * The calibration values differ too much. In doubt, we use | |
621 | * the PIT value as we know that there are PMTIMERs around | |
a977c400 | 622 | * running at double speed. At least we let the user know: |
fbb16e24 | 623 | */ |
c767a54b JP |
624 | pr_warn("PIT calibration deviates from %s: %lu %lu\n", |
625 | hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); | |
626 | pr_info("Using PIT calibration value\n"); | |
fbb16e24 | 627 | return tsc_pit_min; |
bfc0f594 AK |
628 | } |
629 | ||
bfc0f594 AK |
630 | int recalibrate_cpu_khz(void) |
631 | { | |
632 | #ifndef CONFIG_SMP | |
633 | unsigned long cpu_khz_old = cpu_khz; | |
634 | ||
635 | if (cpu_has_tsc) { | |
2d826404 | 636 | tsc_khz = x86_platform.calibrate_tsc(); |
e93ef949 | 637 | cpu_khz = tsc_khz; |
bfc0f594 AK |
638 | cpu_data(0).loops_per_jiffy = |
639 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | |
640 | cpu_khz_old, cpu_khz); | |
641 | return 0; | |
642 | } else | |
643 | return -ENODEV; | |
644 | #else | |
645 | return -ENODEV; | |
646 | #endif | |
647 | } | |
648 | ||
649 | EXPORT_SYMBOL(recalibrate_cpu_khz); | |
650 | ||
2dbe06fa | 651 | |
cd7240c0 SS |
652 | static unsigned long long cyc2ns_suspend; |
653 | ||
b74f05d6 | 654 | void tsc_save_sched_clock_state(void) |
cd7240c0 SS |
655 | { |
656 | if (!sched_clock_stable) | |
657 | return; | |
658 | ||
659 | cyc2ns_suspend = sched_clock(); | |
660 | } | |
661 | ||
662 | /* | |
663 | * Even on processors with invariant TSC, TSC gets reset in some the | |
664 | * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to | |
665 | * arbitrary value (still sync'd across cpu's) during resume from such sleep | |
666 | * states. To cope up with this, recompute the cyc2ns_offset for each cpu so | |
667 | * that sched_clock() continues from the point where it was left off during | |
668 | * suspend. | |
669 | */ | |
b74f05d6 | 670 | void tsc_restore_sched_clock_state(void) |
cd7240c0 SS |
671 | { |
672 | unsigned long long offset; | |
673 | unsigned long flags; | |
674 | int cpu; | |
675 | ||
676 | if (!sched_clock_stable) | |
677 | return; | |
678 | ||
679 | local_irq_save(flags); | |
680 | ||
0a3aee0d | 681 | __this_cpu_write(cyc2ns_offset, 0); |
cd7240c0 SS |
682 | offset = cyc2ns_suspend - sched_clock(); |
683 | ||
684 | for_each_possible_cpu(cpu) | |
685 | per_cpu(cyc2ns_offset, cpu) = offset; | |
686 | ||
687 | local_irq_restore(flags); | |
688 | } | |
689 | ||
2dbe06fa AK |
690 | #ifdef CONFIG_CPU_FREQ |
691 | ||
692 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | |
693 | * changes. | |
694 | * | |
695 | * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's | |
696 | * not that important because current Opteron setups do not support | |
697 | * scaling on SMP anyroads. | |
698 | * | |
699 | * Should fix up last_tsc too. Currently gettimeofday in the | |
700 | * first tick after the change will be slightly wrong. | |
701 | */ | |
702 | ||
703 | static unsigned int ref_freq; | |
704 | static unsigned long loops_per_jiffy_ref; | |
705 | static unsigned long tsc_khz_ref; | |
706 | ||
707 | static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |
708 | void *data) | |
709 | { | |
710 | struct cpufreq_freqs *freq = data; | |
931db6a3 | 711 | unsigned long *lpj; |
2dbe06fa AK |
712 | |
713 | if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) | |
714 | return 0; | |
715 | ||
931db6a3 | 716 | lpj = &boot_cpu_data.loops_per_jiffy; |
2dbe06fa | 717 | #ifdef CONFIG_SMP |
931db6a3 | 718 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
2dbe06fa | 719 | lpj = &cpu_data(freq->cpu).loops_per_jiffy; |
2dbe06fa AK |
720 | #endif |
721 | ||
722 | if (!ref_freq) { | |
723 | ref_freq = freq->old; | |
724 | loops_per_jiffy_ref = *lpj; | |
725 | tsc_khz_ref = tsc_khz; | |
726 | } | |
727 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
728 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | |
729 | (val == CPUFREQ_RESUMECHANGE)) { | |
878f4f53 | 730 | *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); |
2dbe06fa AK |
731 | |
732 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | |
733 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
734 | mark_tsc_unstable("cpufreq changes"); | |
735 | } | |
736 | ||
52a8968c | 737 | set_cyc2ns_scale(tsc_khz, freq->cpu); |
2dbe06fa AK |
738 | |
739 | return 0; | |
740 | } | |
741 | ||
742 | static struct notifier_block time_cpufreq_notifier_block = { | |
743 | .notifier_call = time_cpufreq_notifier | |
744 | }; | |
745 | ||
746 | static int __init cpufreq_tsc(void) | |
747 | { | |
060700b5 LT |
748 | if (!cpu_has_tsc) |
749 | return 0; | |
750 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
751 | return 0; | |
2dbe06fa AK |
752 | cpufreq_register_notifier(&time_cpufreq_notifier_block, |
753 | CPUFREQ_TRANSITION_NOTIFIER); | |
754 | return 0; | |
755 | } | |
756 | ||
757 | core_initcall(cpufreq_tsc); | |
758 | ||
759 | #endif /* CONFIG_CPU_FREQ */ | |
8fbbc4b4 AK |
760 | |
761 | /* clocksource code */ | |
762 | ||
763 | static struct clocksource clocksource_tsc; | |
764 | ||
765 | /* | |
766 | * We compare the TSC to the cycle_last value in the clocksource | |
767 | * structure to avoid a nasty time-warp. This can be observed in a | |
768 | * very small window right after one CPU updated cycle_last under | |
769 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | |
770 | * is smaller than the cycle_last reference value due to a TSC which | |
771 | * is slighty behind. This delta is nowhere else observable, but in | |
772 | * that case it results in a forward time jump in the range of hours | |
773 | * due to the unsigned delta calculation of the time keeping core | |
774 | * code, which is necessary to support wrapping clocksources like pm | |
775 | * timer. | |
776 | */ | |
8e19608e | 777 | static cycle_t read_tsc(struct clocksource *cs) |
8fbbc4b4 AK |
778 | { |
779 | cycle_t ret = (cycle_t)get_cycles(); | |
780 | ||
781 | return ret >= clocksource_tsc.cycle_last ? | |
782 | ret : clocksource_tsc.cycle_last; | |
783 | } | |
784 | ||
17622339 | 785 | static void resume_tsc(struct clocksource *cs) |
1be39679 | 786 | { |
82f9c080 FT |
787 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) |
788 | clocksource_tsc.cycle_last = 0; | |
1be39679 MS |
789 | } |
790 | ||
8fbbc4b4 AK |
791 | static struct clocksource clocksource_tsc = { |
792 | .name = "tsc", | |
793 | .rating = 300, | |
794 | .read = read_tsc, | |
1be39679 | 795 | .resume = resume_tsc, |
8fbbc4b4 | 796 | .mask = CLOCKSOURCE_MASK(64), |
8fbbc4b4 AK |
797 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
798 | CLOCK_SOURCE_MUST_VERIFY, | |
799 | #ifdef CONFIG_X86_64 | |
98d0ac38 | 800 | .archdata = { .vclock_mode = VCLOCK_TSC }, |
8fbbc4b4 AK |
801 | #endif |
802 | }; | |
803 | ||
804 | void mark_tsc_unstable(char *reason) | |
805 | { | |
806 | if (!tsc_unstable) { | |
807 | tsc_unstable = 1; | |
6c56ccec | 808 | sched_clock_stable = 0; |
e82b8e4e | 809 | disable_sched_clock_irqtime(); |
c767a54b | 810 | pr_info("Marking TSC unstable due to %s\n", reason); |
8fbbc4b4 AK |
811 | /* Change only the rating, when not registered */ |
812 | if (clocksource_tsc.mult) | |
7285dd7f TG |
813 | clocksource_mark_unstable(&clocksource_tsc); |
814 | else { | |
815 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; | |
8fbbc4b4 | 816 | clocksource_tsc.rating = 0; |
7285dd7f | 817 | } |
8fbbc4b4 AK |
818 | } |
819 | } | |
820 | ||
821 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |
822 | ||
395628ef AK |
823 | static void __init check_system_tsc_reliable(void) |
824 | { | |
8fbbc4b4 | 825 | #ifdef CONFIG_MGEODE_LX |
395628ef | 826 | /* RTSC counts during suspend */ |
8fbbc4b4 | 827 | #define RTSC_SUSP 0x100 |
8fbbc4b4 AK |
828 | unsigned long res_low, res_high; |
829 | ||
830 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | |
00097c4f | 831 | /* Geode_LX - the OLPC CPU has a very reliable TSC */ |
8fbbc4b4 | 832 | if (res_low & RTSC_SUSP) |
395628ef | 833 | tsc_clocksource_reliable = 1; |
8fbbc4b4 | 834 | #endif |
395628ef AK |
835 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) |
836 | tsc_clocksource_reliable = 1; | |
837 | } | |
8fbbc4b4 AK |
838 | |
839 | /* | |
840 | * Make an educated guess if the TSC is trustworthy and synchronized | |
841 | * over all CPUs. | |
842 | */ | |
148f9bb8 | 843 | int unsynchronized_tsc(void) |
8fbbc4b4 AK |
844 | { |
845 | if (!cpu_has_tsc || tsc_unstable) | |
846 | return 1; | |
847 | ||
3e5095d1 | 848 | #ifdef CONFIG_SMP |
8fbbc4b4 AK |
849 | if (apic_is_clustered_box()) |
850 | return 1; | |
851 | #endif | |
852 | ||
853 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
854 | return 0; | |
d3b8f889 | 855 | |
856 | if (tsc_clocksource_reliable) | |
857 | return 0; | |
8fbbc4b4 AK |
858 | /* |
859 | * Intel systems are normally all synchronized. | |
860 | * Exceptions must mark TSC as unstable: | |
861 | */ | |
862 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | |
863 | /* assume multi socket systems are not synchronized: */ | |
864 | if (num_possible_cpus() > 1) | |
d3b8f889 | 865 | return 1; |
8fbbc4b4 AK |
866 | } |
867 | ||
d3b8f889 | 868 | return 0; |
8fbbc4b4 AK |
869 | } |
870 | ||
08ec0c58 JS |
871 | |
872 | static void tsc_refine_calibration_work(struct work_struct *work); | |
873 | static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); | |
874 | /** | |
875 | * tsc_refine_calibration_work - Further refine tsc freq calibration | |
876 | * @work - ignored. | |
877 | * | |
878 | * This functions uses delayed work over a period of a | |
879 | * second to further refine the TSC freq value. Since this is | |
880 | * timer based, instead of loop based, we don't block the boot | |
881 | * process while this longer calibration is done. | |
882 | * | |
0d2eb44f | 883 | * If there are any calibration anomalies (too many SMIs, etc), |
08ec0c58 JS |
884 | * or the refined calibration is off by 1% of the fast early |
885 | * calibration, we throw out the new calibration and use the | |
886 | * early calibration. | |
887 | */ | |
888 | static void tsc_refine_calibration_work(struct work_struct *work) | |
889 | { | |
890 | static u64 tsc_start = -1, ref_start; | |
891 | static int hpet; | |
892 | u64 tsc_stop, ref_stop, delta; | |
893 | unsigned long freq; | |
894 | ||
895 | /* Don't bother refining TSC on unstable systems */ | |
896 | if (check_tsc_unstable()) | |
897 | goto out; | |
898 | ||
899 | /* | |
900 | * Since the work is started early in boot, we may be | |
901 | * delayed the first time we expire. So set the workqueue | |
902 | * again once we know timers are working. | |
903 | */ | |
904 | if (tsc_start == -1) { | |
905 | /* | |
906 | * Only set hpet once, to avoid mixing hardware | |
907 | * if the hpet becomes enabled later. | |
908 | */ | |
909 | hpet = is_hpet_enabled(); | |
910 | schedule_delayed_work(&tsc_irqwork, HZ); | |
911 | tsc_start = tsc_read_refs(&ref_start, hpet); | |
912 | return; | |
913 | } | |
914 | ||
915 | tsc_stop = tsc_read_refs(&ref_stop, hpet); | |
916 | ||
917 | /* hpet or pmtimer available ? */ | |
62627bec | 918 | if (ref_start == ref_stop) |
08ec0c58 JS |
919 | goto out; |
920 | ||
921 | /* Check, whether the sampling was disturbed by an SMI */ | |
922 | if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) | |
923 | goto out; | |
924 | ||
925 | delta = tsc_stop - tsc_start; | |
926 | delta *= 1000000LL; | |
927 | if (hpet) | |
928 | freq = calc_hpet_ref(delta, ref_start, ref_stop); | |
929 | else | |
930 | freq = calc_pmtimer_ref(delta, ref_start, ref_stop); | |
931 | ||
932 | /* Make sure we're within 1% */ | |
933 | if (abs(tsc_khz - freq) > tsc_khz/100) | |
934 | goto out; | |
935 | ||
936 | tsc_khz = freq; | |
c767a54b JP |
937 | pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", |
938 | (unsigned long)tsc_khz / 1000, | |
939 | (unsigned long)tsc_khz % 1000); | |
08ec0c58 JS |
940 | |
941 | out: | |
942 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | |
943 | } | |
944 | ||
945 | ||
946 | static int __init init_tsc_clocksource(void) | |
8fbbc4b4 | 947 | { |
29fe359c | 948 | if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz) |
a8760eca TG |
949 | return 0; |
950 | ||
395628ef AK |
951 | if (tsc_clocksource_reliable) |
952 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
8fbbc4b4 AK |
953 | /* lower the rating if we already know its unstable: */ |
954 | if (check_tsc_unstable()) { | |
955 | clocksource_tsc.rating = 0; | |
956 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | |
957 | } | |
57779dc2 | 958 | |
82f9c080 FT |
959 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) |
960 | clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; | |
961 | ||
57779dc2 AK |
962 | /* |
963 | * Trust the results of the earlier calibration on systems | |
964 | * exporting a reliable TSC. | |
965 | */ | |
966 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | |
967 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | |
968 | return 0; | |
969 | } | |
970 | ||
08ec0c58 JS |
971 | schedule_delayed_work(&tsc_irqwork, 0); |
972 | return 0; | |
8fbbc4b4 | 973 | } |
08ec0c58 JS |
974 | /* |
975 | * We use device_initcall here, to ensure we run after the hpet | |
976 | * is fully initialized, which may occur at fs_initcall time. | |
977 | */ | |
978 | device_initcall(init_tsc_clocksource); | |
8fbbc4b4 AK |
979 | |
980 | void __init tsc_init(void) | |
981 | { | |
982 | u64 lpj; | |
983 | int cpu; | |
984 | ||
845b3944 TG |
985 | x86_init.timers.tsc_pre_init(); |
986 | ||
8fbbc4b4 AK |
987 | if (!cpu_has_tsc) |
988 | return; | |
989 | ||
2d826404 | 990 | tsc_khz = x86_platform.calibrate_tsc(); |
e93ef949 | 991 | cpu_khz = tsc_khz; |
8fbbc4b4 | 992 | |
e93ef949 | 993 | if (!tsc_khz) { |
8fbbc4b4 AK |
994 | mark_tsc_unstable("could not calculate TSC khz"); |
995 | return; | |
996 | } | |
997 | ||
c767a54b JP |
998 | pr_info("Detected %lu.%03lu MHz processor\n", |
999 | (unsigned long)cpu_khz / 1000, | |
1000 | (unsigned long)cpu_khz % 1000); | |
8fbbc4b4 AK |
1001 | |
1002 | /* | |
1003 | * Secondary CPUs do not run through tsc_init(), so set up | |
1004 | * all the scale factors for all CPUs, assuming the same | |
1005 | * speed as the bootup CPU. (cpufreq notifiers will fix this | |
1006 | * up if their speed diverges) | |
1007 | */ | |
1008 | for_each_possible_cpu(cpu) | |
1009 | set_cyc2ns_scale(cpu_khz, cpu); | |
1010 | ||
1011 | if (tsc_disabled > 0) | |
1012 | return; | |
1013 | ||
1014 | /* now allow native_sched_clock() to use rdtsc */ | |
1015 | tsc_disabled = 0; | |
1016 | ||
e82b8e4e VP |
1017 | if (!no_sched_irq_time) |
1018 | enable_sched_clock_irqtime(); | |
1019 | ||
70de9a97 AK |
1020 | lpj = ((u64)tsc_khz * 1000); |
1021 | do_div(lpj, HZ); | |
1022 | lpj_fine = lpj; | |
1023 | ||
8fbbc4b4 | 1024 | use_tsc_delay(); |
8fbbc4b4 AK |
1025 | |
1026 | if (unsynchronized_tsc()) | |
1027 | mark_tsc_unstable("TSCs unsynchronized"); | |
1028 | ||
395628ef | 1029 | check_system_tsc_reliable(); |
8fbbc4b4 AK |
1030 | } |
1031 | ||
b565201c JS |
1032 | #ifdef CONFIG_SMP |
1033 | /* | |
1034 | * If we have a constant TSC and are using the TSC for the delay loop, | |
1035 | * we can skip clock calibration if another cpu in the same socket has already | |
1036 | * been calibrated. This assumes that CONSTANT_TSC applies to all | |
1037 | * cpus in the socket - this should be a safe assumption. | |
1038 | */ | |
148f9bb8 | 1039 | unsigned long calibrate_delay_is_known(void) |
b565201c JS |
1040 | { |
1041 | int i, cpu = smp_processor_id(); | |
1042 | ||
1043 | if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) | |
1044 | return 0; | |
1045 | ||
1046 | for_each_online_cpu(i) | |
1047 | if (cpu_data(i).phys_proc_id == cpu_data(cpu).phys_proc_id) | |
1048 | return cpu_data(i).loops_per_jiffy; | |
1049 | return 0; | |
1050 | } | |
1051 | #endif |