x86, tsc, sched: Recompute cyc2ns_offset's during resume from sleep states
[deliverable/linux.git] / arch / x86 / include / asm / tsc.h
1 /*
2 * x86 TSC related functions
3 */
4 #ifndef _ASM_X86_TSC_H
5 #define _ASM_X86_TSC_H
6
7 #include <asm/processor.h>
8
9 #define NS_SCALE 10 /* 2^10, carefully chosen */
10 #define US_SCALE 32 /* 2^32, arbitralrily chosen */
11
12 /*
13 * Standard way to access the cycle counter.
14 */
15 typedef unsigned long long cycles_t;
16
17 extern unsigned int cpu_khz;
18 extern unsigned int tsc_khz;
19
20 extern void disable_TSC(void);
21
22 static inline cycles_t get_cycles(void)
23 {
24 unsigned long long ret = 0;
25
26 #ifndef CONFIG_X86_TSC
27 if (!cpu_has_tsc)
28 return 0;
29 #endif
30 rdtscll(ret);
31
32 return ret;
33 }
34
35 static __always_inline cycles_t vget_cycles(void)
36 {
37 /*
38 * We only do VDSOs on TSC capable CPUs, so this shouldnt
39 * access boot_cpu_data (which is not VDSO-safe):
40 */
41 #ifndef CONFIG_X86_TSC
42 if (!cpu_has_tsc)
43 return 0;
44 #endif
45 return (cycles_t)__native_read_tsc();
46 }
47
48 extern void tsc_init(void);
49 extern void mark_tsc_unstable(char *reason);
50 extern int unsynchronized_tsc(void);
51 extern int check_tsc_unstable(void);
52 extern unsigned long native_calibrate_tsc(void);
53
54 /*
55 * Boot-time check whether the TSCs are synchronized across
56 * all CPUs/cores:
57 */
58 extern void check_tsc_sync_source(int cpu);
59 extern void check_tsc_sync_target(void);
60
61 extern int notsc_setup(char *);
62 extern void save_sched_clock_state(void);
63 extern void restore_sched_clock_state(void);
64
65 #endif /* _ASM_X86_TSC_H */
This page took 0.067165 seconds and 5 git commands to generate.