x86: merge sched_clock handling
[deliverable/linux.git] / arch / x86 / kernel / tsc_32.c
CommitLineData
bb29ab26 1#include <linux/sched.h>
5d0cf410 2#include <linux/clocksource.h>
539eb11e 3#include <linux/workqueue.h>
6ff10de3 4#include <linux/delay.h>
539eb11e 5#include <linux/cpufreq.h>
6#include <linux/jiffies.h>
7#include <linux/init.h>
5d0cf410 8#include <linux/dmi.h>
53d517cd 9#include <linux/percpu.h>
539eb11e 10
5d0cf410 11#include <asm/delay.h>
539eb11e 12#include <asm/tsc.h>
13#include <asm/io.h>
6cb9a835 14#include <asm/timer.h>
539eb11e 15
16#include "mach_timer.h"
17
0ef95533
AK
18extern int tsc_unstable;
19extern int tsc_disabled;
539eb11e 20
27b46d76 21/* Accelerators for sched_clock()
539eb11e 22 * convert from cycles(64bits) => nanoseconds (64bits)
23 * basic equation:
24 * ns = cycles / (freq / ns_per_sec)
25 * ns = cycles * (ns_per_sec / freq)
26 * ns = cycles * (10^9 / (cpu_khz * 10^3))
27 * ns = cycles * (10^6 / cpu_khz)
28 *
29 * Then we use scaling math (suggested by george@mvista.com) to get:
30 * ns = cycles * (10^6 * SC / cpu_khz) / SC
31 * ns = cycles * cyc2ns_scale / SC
32 *
33 * And since SC is a constant power of two, we can convert the div
34 * into a shift.
35 *
96315129 36 * We can use khz divisor instead of mhz to keep a better precision, since
539eb11e 37 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
38 * (mathieu.desnoyers@polymtl.ca)
39 *
40 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
41 */
539eb11e 42
53d517cd 43DEFINE_PER_CPU(unsigned long, cyc2ns);
539eb11e 44
53d517cd 45static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
539eb11e 46{
53d517cd 47 unsigned long long tsc_now, ns_now;
1725037f 48 unsigned long flags, *scale;
53d517cd
GC
49
50 local_irq_save(flags);
51 sched_clock_idle_sleep_event();
52
53 scale = &per_cpu(cyc2ns, cpu);
54
55 rdtscll(tsc_now);
56 ns_now = __cycles_2_ns(tsc_now);
57
53d517cd
GC
58 if (cpu_khz)
59 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
60
61 /*
62 * Start smoothly with the new frequency:
63 */
64 sched_clock_idle_wakeup_event(0);
65 local_irq_restore(flags);
539eb11e 66}
67
1182d852 68unsigned long native_calculate_cpu_khz(void)
539eb11e 69{
70 unsigned long long start, end;
71 unsigned long count;
edaf420f 72 u64 delta64 = (u64)ULLONG_MAX;
539eb11e 73 int i;
74 unsigned long flags;
75
76 local_irq_save(flags);
77
8c660065 78 /* run 3 times to ensure the cache is warm and to get an accurate reading */
539eb11e 79 for (i = 0; i < 3; i++) {
80 mach_prepare_counter();
81 rdtscll(start);
82 mach_countup(&count);
83 rdtscll(end);
8c660065
DJ
84
85 /*
86 * Error: ECTCNEVERSET
87 * The CTC wasn't reliable: we got a hit on the very first read,
88 * or the CPU was so fast/slow that the quotient wouldn't fit in
89 * 32 bits..
90 */
91 if (count <= 1)
92 continue;
93
94 /* cpu freq too slow: */
95 if ((end - start) <= CALIBRATE_TIME_MSEC)
96 continue;
97
98 /*
99 * We want the minimum time of all runs in case one of them
100 * is inaccurate due to SMI or other delay
101 */
edaf420f 102 delta64 = min(delta64, (end - start));
539eb11e 103 }
539eb11e 104
8c660065 105 /* cpu freq too fast (or every run was bad): */
539eb11e 106 if (delta64 > (1ULL<<32))
107 goto err;
108
539eb11e 109 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
110 do_div(delta64,CALIBRATE_TIME_MSEC);
111
112 local_irq_restore(flags);
113 return (unsigned long)delta64;
114err:
115 local_irq_restore(flags);
116 return 0;
117}
118
119int recalibrate_cpu_khz(void)
120{
121#ifndef CONFIG_SMP
122 unsigned long cpu_khz_old = cpu_khz;
123
124 if (cpu_has_tsc) {
125 cpu_khz = calculate_cpu_khz();
126 tsc_khz = cpu_khz;
92cb7612
MT
127 cpu_data(0).loops_per_jiffy =
128 cpufreq_scale(cpu_data(0).loops_per_jiffy,
539eb11e 129 cpu_khz_old, cpu_khz);
130 return 0;
131 } else
132 return -ENODEV;
133#else
134 return -ENODEV;
135#endif
136}
137
138EXPORT_SYMBOL(recalibrate_cpu_khz);
139
539eb11e 140#ifdef CONFIG_CPU_FREQ
141
539eb11e 142/*
143 * if the CPU frequency is scaled, TSC-based delays will need a different
144 * loops_per_jiffy value to function properly.
145 */
4bd01600
PM
146static unsigned int ref_freq;
147static unsigned long loops_per_jiffy_ref;
148static unsigned long cpu_khz_ref;
539eb11e 149
150static int
151time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
152{
153 struct cpufreq_freqs *freq = data;
154
539eb11e 155 if (!ref_freq) {
156 if (!freq->old){
157 ref_freq = freq->new;
df3624aa 158 return 0;
539eb11e 159 }
160 ref_freq = freq->old;
92cb7612 161 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
539eb11e 162 cpu_khz_ref = cpu_khz;
163 }
164
165 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
166 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
167 (val == CPUFREQ_RESUMECHANGE)) {
168 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
92cb7612 169 cpu_data(freq->cpu).loops_per_jiffy =
539eb11e 170 cpufreq_scale(loops_per_jiffy_ref,
171 ref_freq, freq->new);
172
173 if (cpu_khz) {
174
175 if (num_online_cpus() == 1)
176 cpu_khz = cpufreq_scale(cpu_khz_ref,
177 ref_freq, freq->new);
178 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
179 tsc_khz = cpu_khz;
4f41c94d 180 set_cyc2ns_scale(cpu_khz, freq->cpu);
539eb11e 181 /*
182 * TSC based sched_clock turns
183 * to junk w/ cpufreq
184 */
5a90cf20 185 mark_tsc_unstable("cpufreq changes");
539eb11e 186 }
187 }
188 }
539eb11e 189
190 return 0;
191}
192
193static struct notifier_block time_cpufreq_notifier_block = {
194 .notifier_call = time_cpufreq_notifier
195};
196
197static int __init cpufreq_tsc(void)
198{
26a08eb3
TG
199 return cpufreq_register_notifier(&time_cpufreq_notifier_block,
200 CPUFREQ_TRANSITION_NOTIFIER);
539eb11e 201}
539eb11e 202core_initcall(cpufreq_tsc);
203
204#endif
5d0cf410 205
206/* clock source code */
207
d8bb6f4c 208static struct clocksource clocksource_tsc;
5d0cf410 209
d8bb6f4c
TG
210/*
211 * We compare the TSC to the cycle_last value in the clocksource
212 * structure to avoid a nasty time-warp issue. This can be observed in
213 * a very small window right after one CPU updated cycle_last under
214 * xtime lock and the other CPU reads a TSC value which is smaller
215 * than the cycle_last reference value due to a TSC which is slighty
216 * behind. This delta is nowhere else observable, but in that case it
217 * results in a forward time jump in the range of hours due to the
218 * unsigned delta calculation of the time keeping core code, which is
219 * necessary to support wrapping clocksources like pm timer.
220 */
5d0cf410 221static cycle_t read_tsc(void)
222{
223 cycle_t ret;
224
225 rdtscll(ret);
226
d8bb6f4c
TG
227 return ret >= clocksource_tsc.cycle_last ?
228 ret : clocksource_tsc.cycle_last;
5d0cf410 229}
230
231static struct clocksource clocksource_tsc = {
232 .name = "tsc",
233 .rating = 300,
234 .read = read_tsc,
7f9f303a 235 .mask = CLOCKSOURCE_MASK(64),
5d0cf410 236 .mult = 0, /* to be set */
237 .shift = 22,
73b08d2a
TG
238 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
239 CLOCK_SOURCE_MUST_VERIFY,
5d0cf410 240};
241
5a90cf20 242void mark_tsc_unstable(char *reason)
5d0cf410 243{
7e69f2b1
TG
244 if (!tsc_unstable) {
245 tsc_unstable = 1;
5a90cf20 246 printk("Marking TSC unstable due to: %s.\n", reason);
7e69f2b1
TG
247 /* Can be called before registration */
248 if (clocksource_tsc.mult)
249 clocksource_change_rating(&clocksource_tsc, 0);
250 else
251 clocksource_tsc.rating = 0;
5d0cf410 252 }
5d0cf410 253}
7e69f2b1 254EXPORT_SYMBOL_GPL(mark_tsc_unstable);
5d0cf410 255
1855256c 256static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
5d0cf410 257{
258 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
9ccc906c 259 d->ident);
7e69f2b1 260 tsc_unstable = 1;
5d0cf410 261 return 0;
262}
263
264/* List of systems that have known TSC problems */
265static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
266 {
267 .callback = dmi_mark_tsc_unstable,
268 .ident = "IBM Thinkpad 380XD",
269 .matches = {
270 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
271 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
272 },
273 },
274 {}
275};
276
5d0cf410 277/*
278 * Make an educated guess if the TSC is trustworthy and synchronized
279 * over all CPUs.
280 */
95492e46 281__cpuinit int unsynchronized_tsc(void)
5d0cf410 282{
95492e46
IM
283 if (!cpu_has_tsc || tsc_unstable)
284 return 1;
51fc97b9
AK
285
286 /* Anything with constant TSC should be synchronized */
287 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
288 return 0;
289
5d0cf410 290 /*
291 * Intel systems are normally all synchronized.
292 * Exceptions must mark TSC as unstable:
293 */
7e69f2b1
TG
294 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
295 /* assume multi socket systems are not synchronized: */
296 if (num_possible_cpus() > 1)
297 tsc_unstable = 1;
298 }
299 return tsc_unstable;
5d0cf410 300}
301
07190a08
MT
302/*
303 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
304 */
305#ifdef CONFIG_MGEODE_LX
306/* RTSC counts during suspend */
307#define RTSC_SUSP 0x100
308
309static void __init check_geode_tsc_reliable(void)
310{
f97586b6 311 unsigned long res_low, res_high;
07190a08 312
f97586b6
IM
313 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
314 if (res_low & RTSC_SUSP)
07190a08
MT
315 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
316}
317#else
318static inline void check_geode_tsc_reliable(void) { }
319#endif
320
6bb74df4 321
322void __init tsc_init(void)
5d0cf410 323{
53d517cd 324 int cpu;
3da757da 325 u64 lpj;
53d517cd 326
df17b1d9 327 if (!cpu_has_tsc || tsc_disabled > 0)
3c2047cd 328 return;
5d0cf410 329
6bb74df4 330 cpu_khz = calculate_cpu_khz();
331 tsc_khz = cpu_khz;
5d0cf410 332
3c2047cd
RR
333 if (!cpu_khz) {
334 mark_tsc_unstable("could not calculate TSC khz");
335 return;
336 }
5d0cf410 337
3da757da
AK
338 lpj = ((u64)tsc_khz * 1000);
339 do_div(lpj, HZ);
f3f3149f 340 lpj_fine = lpj;
3da757da 341
df17b1d9
MP
342 /* now allow native_sched_clock() to use rdtsc */
343 tsc_disabled = 0;
344
6bb74df4 345 printk("Detected %lu.%03lu MHz processor.\n",
346 (unsigned long)cpu_khz / 1000,
347 (unsigned long)cpu_khz % 1000);
348
53d517cd
GC
349 /*
350 * Secondary CPUs do not run through tsc_init(), so set up
351 * all the scale factors for all CPUs, assuming the same
352 * speed as the bootup CPU. (cpufreq notifiers will fix this
353 * up if their speed diverges)
354 */
355 for_each_possible_cpu(cpu)
356 set_cyc2ns_scale(cpu_khz, cpu);
357
6bb74df4 358 use_tsc_delay();
359
360 /* Check and install the TSC clocksource */
361 dmi_check_system(bad_tsc_dmi_table);
362
363 unsynchronized_tsc();
364 check_geode_tsc_reliable();
0748aca6
TG
365 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
366 clocksource_tsc.shift);
6bb74df4 367 /* lower the rating if we already know its unstable: */
368 if (check_tsc_unstable()) {
369 clocksource_tsc.rating = 0;
370 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
9ccc906c 371 }
6bb74df4 372 clocksource_register(&clocksource_tsc);
6bb74df4 373}
This page took 0.311711 seconds and 5 git commands to generate.