Commit | Line | Data |
---|---|---|
539eb11e | 1 | /* |
2 | * This code largely moved from arch/i386/kernel/timer/timer_tsc.c | |
3 | * which was originally moved from arch/i386/kernel/time.c. | |
4 | * See comments there for proper credits. | |
5 | */ | |
6 | ||
bb29ab26 | 7 | #include <linux/sched.h> |
5d0cf410 | 8 | #include <linux/clocksource.h> |
539eb11e | 9 | #include <linux/workqueue.h> |
10 | #include <linux/cpufreq.h> | |
11 | #include <linux/jiffies.h> | |
12 | #include <linux/init.h> | |
5d0cf410 | 13 | #include <linux/dmi.h> |
539eb11e | 14 | |
5d0cf410 | 15 | #include <asm/delay.h> |
539eb11e | 16 | #include <asm/tsc.h> |
17 | #include <asm/io.h> | |
6cb9a835 | 18 | #include <asm/timer.h> |
539eb11e | 19 | |
20 | #include "mach_timer.h" | |
21 | ||
d9a5c0a4 TG |
22 | static int tsc_enabled; |
23 | ||
539eb11e | 24 | /* |
25 | * On some systems the TSC frequency does not | |
26 | * change with the cpu frequency. So we need | |
27 | * an extra value to store the TSC freq | |
28 | */ | |
29 | unsigned int tsc_khz; | |
30 | ||
664c0d3d | 31 | int tsc_disable; |
539eb11e | 32 | |
33 | #ifdef CONFIG_X86_TSC | |
34 | static int __init tsc_setup(char *str) | |
35 | { | |
36 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | |
37 | "cannot disable TSC.\n"); | |
38 | return 1; | |
39 | } | |
40 | #else | |
41 | /* | |
42 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
43 | * in cpu/common.c | |
44 | */ | |
45 | static int __init tsc_setup(char *str) | |
46 | { | |
47 | tsc_disable = 1; | |
48 | ||
49 | return 1; | |
50 | } | |
51 | #endif | |
52 | ||
53 | __setup("notsc", tsc_setup); | |
54 | ||
539eb11e | 55 | /* |
56 | * code to mark and check if the TSC is unstable | |
57 | * due to cpufreq or due to unsynced TSCs | |
58 | */ | |
59 | static int tsc_unstable; | |
60 | ||
61 | static inline int check_tsc_unstable(void) | |
62 | { | |
63 | return tsc_unstable; | |
64 | } | |
65 | ||
539eb11e | 66 | /* Accellerators for sched_clock() |
67 | * convert from cycles(64bits) => nanoseconds (64bits) | |
68 | * basic equation: | |
69 | * ns = cycles / (freq / ns_per_sec) | |
70 | * ns = cycles * (ns_per_sec / freq) | |
71 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
72 | * ns = cycles * (10^6 / cpu_khz) | |
73 | * | |
74 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
75 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
76 | * ns = cycles * cyc2ns_scale / SC | |
77 | * | |
78 | * And since SC is a constant power of two, we can convert the div | |
79 | * into a shift. | |
80 | * | |
81 | * We can use khz divisor instead of mhz to keep a better percision, since | |
82 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
83 | * (mathieu.desnoyers@polymtl.ca) | |
84 | * | |
85 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
86 | */ | |
688340ea | 87 | unsigned long cyc2ns_scale __read_mostly; |
539eb11e | 88 | |
89 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | |
90 | ||
91 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | |
92 | { | |
93 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | |
94 | } | |
95 | ||
539eb11e | 96 | /* |
97 | * Scheduler clock - returns current time in nanosec units. | |
98 | */ | |
688340ea | 99 | unsigned long long native_sched_clock(void) |
539eb11e | 100 | { |
101 | unsigned long long this_offset; | |
102 | ||
103 | /* | |
f9690982 | 104 | * Fall back to jiffies if there's no TSC available: |
bb29ab26 IM |
105 | * ( But note that we still use it if the TSC is marked |
106 | * unstable. We do this because unlike Time Of Day, | |
107 | * the scheduler clock tolerates small errors and it's | |
108 | * very important for it to be as fast as the platform | |
109 | * can achive it. ) | |
539eb11e | 110 | */ |
bb29ab26 | 111 | if (unlikely(!tsc_enabled && !tsc_unstable)) |
f9690982 | 112 | /* No locking but a rare wrong value is not a big deal: */ |
539eb11e | 113 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
114 | ||
115 | /* read the Time Stamp Counter: */ | |
688340ea | 116 | rdtscll(this_offset); |
539eb11e | 117 | |
118 | /* return the value in ns */ | |
119 | return cycles_2_ns(this_offset); | |
120 | } | |
121 | ||
688340ea JF |
122 | /* We need to define a real function for sched_clock, to override the |
123 | weak default version */ | |
124 | #ifdef CONFIG_PARAVIRT | |
125 | unsigned long long sched_clock(void) | |
126 | { | |
127 | return paravirt_sched_clock(); | |
128 | } | |
129 | #else | |
130 | unsigned long long sched_clock(void) | |
131 | __attribute__((alias("native_sched_clock"))); | |
132 | #endif | |
133 | ||
1182d852 | 134 | unsigned long native_calculate_cpu_khz(void) |
539eb11e | 135 | { |
136 | unsigned long long start, end; | |
137 | unsigned long count; | |
138 | u64 delta64; | |
139 | int i; | |
140 | unsigned long flags; | |
141 | ||
142 | local_irq_save(flags); | |
143 | ||
144 | /* run 3 times to ensure the cache is warm */ | |
145 | for (i = 0; i < 3; i++) { | |
146 | mach_prepare_counter(); | |
147 | rdtscll(start); | |
148 | mach_countup(&count); | |
149 | rdtscll(end); | |
150 | } | |
151 | /* | |
152 | * Error: ECTCNEVERSET | |
153 | * The CTC wasn't reliable: we got a hit on the very first read, | |
154 | * or the CPU was so fast/slow that the quotient wouldn't fit in | |
155 | * 32 bits.. | |
156 | */ | |
157 | if (count <= 1) | |
158 | goto err; | |
159 | ||
160 | delta64 = end - start; | |
161 | ||
162 | /* cpu freq too fast: */ | |
163 | if (delta64 > (1ULL<<32)) | |
164 | goto err; | |
165 | ||
166 | /* cpu freq too slow: */ | |
167 | if (delta64 <= CALIBRATE_TIME_MSEC) | |
168 | goto err; | |
169 | ||
170 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | |
171 | do_div(delta64,CALIBRATE_TIME_MSEC); | |
172 | ||
173 | local_irq_restore(flags); | |
174 | return (unsigned long)delta64; | |
175 | err: | |
176 | local_irq_restore(flags); | |
177 | return 0; | |
178 | } | |
179 | ||
180 | int recalibrate_cpu_khz(void) | |
181 | { | |
182 | #ifndef CONFIG_SMP | |
183 | unsigned long cpu_khz_old = cpu_khz; | |
184 | ||
185 | if (cpu_has_tsc) { | |
186 | cpu_khz = calculate_cpu_khz(); | |
187 | tsc_khz = cpu_khz; | |
188 | cpu_data[0].loops_per_jiffy = | |
189 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | |
190 | cpu_khz_old, cpu_khz); | |
191 | return 0; | |
192 | } else | |
193 | return -ENODEV; | |
194 | #else | |
195 | return -ENODEV; | |
196 | #endif | |
197 | } | |
198 | ||
199 | EXPORT_SYMBOL(recalibrate_cpu_khz); | |
200 | ||
539eb11e | 201 | #ifdef CONFIG_CPU_FREQ |
202 | ||
539eb11e | 203 | /* |
204 | * if the CPU frequency is scaled, TSC-based delays will need a different | |
205 | * loops_per_jiffy value to function properly. | |
206 | */ | |
207 | static unsigned int ref_freq = 0; | |
208 | static unsigned long loops_per_jiffy_ref = 0; | |
209 | static unsigned long cpu_khz_ref = 0; | |
210 | ||
211 | static int | |
212 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | |
213 | { | |
214 | struct cpufreq_freqs *freq = data; | |
215 | ||
539eb11e | 216 | if (!ref_freq) { |
217 | if (!freq->old){ | |
218 | ref_freq = freq->new; | |
df3624aa | 219 | return 0; |
539eb11e | 220 | } |
221 | ref_freq = freq->old; | |
222 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | |
223 | cpu_khz_ref = cpu_khz; | |
224 | } | |
225 | ||
226 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
227 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | |
228 | (val == CPUFREQ_RESUMECHANGE)) { | |
229 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
230 | cpu_data[freq->cpu].loops_per_jiffy = | |
231 | cpufreq_scale(loops_per_jiffy_ref, | |
232 | ref_freq, freq->new); | |
233 | ||
234 | if (cpu_khz) { | |
235 | ||
236 | if (num_online_cpus() == 1) | |
237 | cpu_khz = cpufreq_scale(cpu_khz_ref, | |
238 | ref_freq, freq->new); | |
239 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | |
240 | tsc_khz = cpu_khz; | |
241 | set_cyc2ns_scale(cpu_khz); | |
242 | /* | |
243 | * TSC based sched_clock turns | |
244 | * to junk w/ cpufreq | |
245 | */ | |
5a90cf20 | 246 | mark_tsc_unstable("cpufreq changes"); |
539eb11e | 247 | } |
248 | } | |
249 | } | |
539eb11e | 250 | |
251 | return 0; | |
252 | } | |
253 | ||
254 | static struct notifier_block time_cpufreq_notifier_block = { | |
255 | .notifier_call = time_cpufreq_notifier | |
256 | }; | |
257 | ||
258 | static int __init cpufreq_tsc(void) | |
259 | { | |
26a08eb3 TG |
260 | return cpufreq_register_notifier(&time_cpufreq_notifier_block, |
261 | CPUFREQ_TRANSITION_NOTIFIER); | |
539eb11e | 262 | } |
539eb11e | 263 | core_initcall(cpufreq_tsc); |
264 | ||
265 | #endif | |
5d0cf410 | 266 | |
267 | /* clock source code */ | |
268 | ||
269 | static unsigned long current_tsc_khz = 0; | |
5d0cf410 | 270 | |
271 | static cycle_t read_tsc(void) | |
272 | { | |
273 | cycle_t ret; | |
274 | ||
275 | rdtscll(ret); | |
276 | ||
277 | return ret; | |
278 | } | |
279 | ||
280 | static struct clocksource clocksource_tsc = { | |
281 | .name = "tsc", | |
282 | .rating = 300, | |
283 | .read = read_tsc, | |
7f9f303a | 284 | .mask = CLOCKSOURCE_MASK(64), |
5d0cf410 | 285 | .mult = 0, /* to be set */ |
286 | .shift = 22, | |
73b08d2a TG |
287 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
288 | CLOCK_SOURCE_MUST_VERIFY, | |
5d0cf410 | 289 | }; |
290 | ||
5a90cf20 | 291 | void mark_tsc_unstable(char *reason) |
5d0cf410 | 292 | { |
bb29ab26 | 293 | sched_clock_unstable_event(); |
7e69f2b1 TG |
294 | if (!tsc_unstable) { |
295 | tsc_unstable = 1; | |
d9a5c0a4 | 296 | tsc_enabled = 0; |
5a90cf20 | 297 | printk("Marking TSC unstable due to: %s.\n", reason); |
7e69f2b1 TG |
298 | /* Can be called before registration */ |
299 | if (clocksource_tsc.mult) | |
300 | clocksource_change_rating(&clocksource_tsc, 0); | |
301 | else | |
302 | clocksource_tsc.rating = 0; | |
5d0cf410 | 303 | } |
5d0cf410 | 304 | } |
7e69f2b1 | 305 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); |
5d0cf410 | 306 | |
307 | static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) | |
308 | { | |
309 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | |
310 | d->ident); | |
7e69f2b1 | 311 | tsc_unstable = 1; |
5d0cf410 | 312 | return 0; |
313 | } | |
314 | ||
315 | /* List of systems that have known TSC problems */ | |
316 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | |
317 | { | |
318 | .callback = dmi_mark_tsc_unstable, | |
319 | .ident = "IBM Thinkpad 380XD", | |
320 | .matches = { | |
321 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | |
322 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | |
323 | }, | |
324 | }, | |
325 | {} | |
326 | }; | |
327 | ||
5d0cf410 | 328 | /* |
329 | * Make an educated guess if the TSC is trustworthy and synchronized | |
330 | * over all CPUs. | |
331 | */ | |
95492e46 | 332 | __cpuinit int unsynchronized_tsc(void) |
5d0cf410 | 333 | { |
95492e46 IM |
334 | if (!cpu_has_tsc || tsc_unstable) |
335 | return 1; | |
5d0cf410 | 336 | /* |
337 | * Intel systems are normally all synchronized. | |
338 | * Exceptions must mark TSC as unstable: | |
339 | */ | |
7e69f2b1 TG |
340 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
341 | /* assume multi socket systems are not synchronized: */ | |
342 | if (num_possible_cpus() > 1) | |
343 | tsc_unstable = 1; | |
344 | } | |
345 | return tsc_unstable; | |
5d0cf410 | 346 | } |
347 | ||
07190a08 MT |
348 | /* |
349 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | |
350 | */ | |
351 | #ifdef CONFIG_MGEODE_LX | |
352 | /* RTSC counts during suspend */ | |
353 | #define RTSC_SUSP 0x100 | |
354 | ||
355 | static void __init check_geode_tsc_reliable(void) | |
356 | { | |
357 | unsigned long val; | |
358 | ||
359 | rdmsrl(MSR_GEODE_BUSCONT_CONF0, val); | |
360 | if ((val & RTSC_SUSP)) | |
361 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
362 | } | |
363 | #else | |
364 | static inline void check_geode_tsc_reliable(void) { } | |
365 | #endif | |
366 | ||
6bb74df4 | 367 | |
368 | void __init tsc_init(void) | |
5d0cf410 | 369 | { |
6bb74df4 | 370 | if (!cpu_has_tsc || tsc_disable) |
371 | goto out_no_tsc; | |
5d0cf410 | 372 | |
6bb74df4 | 373 | cpu_khz = calculate_cpu_khz(); |
374 | tsc_khz = cpu_khz; | |
5d0cf410 | 375 | |
6bb74df4 | 376 | if (!cpu_khz) |
377 | goto out_no_tsc; | |
5d0cf410 | 378 | |
6bb74df4 | 379 | printk("Detected %lu.%03lu MHz processor.\n", |
380 | (unsigned long)cpu_khz / 1000, | |
381 | (unsigned long)cpu_khz % 1000); | |
382 | ||
383 | set_cyc2ns_scale(cpu_khz); | |
384 | use_tsc_delay(); | |
385 | ||
386 | /* Check and install the TSC clocksource */ | |
387 | dmi_check_system(bad_tsc_dmi_table); | |
388 | ||
389 | unsynchronized_tsc(); | |
390 | check_geode_tsc_reliable(); | |
391 | current_tsc_khz = tsc_khz; | |
392 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | |
393 | clocksource_tsc.shift); | |
394 | /* lower the rating if we already know its unstable: */ | |
395 | if (check_tsc_unstable()) { | |
396 | clocksource_tsc.rating = 0; | |
397 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | |
d9a5c0a4 TG |
398 | } else |
399 | tsc_enabled = 1; | |
400 | ||
6bb74df4 | 401 | clocksource_register(&clocksource_tsc); |
5d0cf410 | 402 | |
6bb74df4 | 403 | return; |
5d0cf410 | 404 | |
6bb74df4 | 405 | out_no_tsc: |
406 | /* | |
407 | * Set the tsc_disable flag if there's no TSC support, this | |
408 | * makes it a fast flag for the kernel to see whether it | |
409 | * should be using the TSC. | |
410 | */ | |
411 | tsc_disable = 1; | |
412 | } |