Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Precise Delay Loops for i386 | |
3 | * | |
4 | * Copyright (C) 1993 Linus Torvalds | |
5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | |
e01b70ef | 6 | * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com> |
1da177e4 LT |
7 | * |
8 | * The __delay function must _NOT_ be inlined as its execution time | |
9 | * depends wildly on alignment on many x86 processors. The additional | |
10 | * jump magic is needed to get the timing stable on all the CPU's | |
11 | * we have to worry about. | |
12 | */ | |
13 | ||
6f84fa2f | 14 | #include <linux/module.h> |
1da177e4 | 15 | #include <linux/sched.h> |
941e492b | 16 | #include <linux/timex.h> |
35d5d08a | 17 | #include <linux/preempt.h> |
1da177e4 | 18 | #include <linux/delay.h> |
6f84fa2f | 19 | |
1da177e4 LT |
20 | #include <asm/processor.h> |
21 | #include <asm/delay.h> | |
22 | #include <asm/timer.h> | |
b466bdb6 | 23 | #include <asm/mwait.h> |
1da177e4 LT |
24 | |
25 | #ifdef CONFIG_SMP | |
6f84fa2f | 26 | # include <asm/smp.h> |
1da177e4 LT |
27 | #endif |
28 | ||
6f84fa2f | 29 | /* simple loop based delay: */ |
30 | static void delay_loop(unsigned long loops) | |
31 | { | |
f0fbf0ab | 32 | asm volatile( |
e01b70ef JH |
33 | " test %0,%0 \n" |
34 | " jz 3f \n" | |
35 | " jmp 1f \n" | |
36 | ||
37 | ".align 16 \n" | |
38 | "1: jmp 2f \n" | |
39 | ||
40 | ".align 16 \n" | |
ff1b15b6 | 41 | "2: dec %0 \n" |
e01b70ef | 42 | " jnz 2b \n" |
ff1b15b6 | 43 | "3: dec %0 \n" |
e01b70ef JH |
44 | |
45 | : /* we don't need output */ | |
46 | :"a" (loops) | |
47 | ); | |
6f84fa2f | 48 | } |
49 | ||
50 | /* TSC based delay: */ | |
a7f4255f | 51 | static void delay_tsc(unsigned long __loops) |
6f84fa2f | 52 | { |
9cfa1a02 | 53 | u64 bclock, now, loops = __loops; |
5c1ea082 | 54 | int cpu; |
6f84fa2f | 55 | |
5c1ea082 SR |
56 | preempt_disable(); |
57 | cpu = smp_processor_id(); | |
03b9730b | 58 | bclock = rdtsc_ordered(); |
5c1ea082 | 59 | for (;;) { |
03b9730b | 60 | now = rdtsc_ordered(); |
5c1ea082 SR |
61 | if ((now - bclock) >= loops) |
62 | break; | |
63 | ||
64 | /* Allow RT tasks to run */ | |
65 | preempt_enable(); | |
66 | rep_nop(); | |
67 | preempt_disable(); | |
68 | ||
69 | /* | |
70 | * It is possible that we moved to another CPU, and | |
71 | * since TSC's are per-cpu we need to calculate | |
72 | * that. The delay must guarantee that we wait "at | |
73 | * least" the amount of time. Being moved to another | |
74 | * CPU could make the wait longer but we just need to | |
75 | * make sure we waited long enough. Rebalance the | |
76 | * counter for this CPU. | |
77 | */ | |
78 | if (unlikely(cpu != smp_processor_id())) { | |
79 | loops -= (now - bclock); | |
80 | cpu = smp_processor_id(); | |
03b9730b | 81 | bclock = rdtsc_ordered(); |
5c1ea082 SR |
82 | } |
83 | } | |
35d5d08a | 84 | preempt_enable(); |
6f84fa2f | 85 | } |
86 | ||
b466bdb6 HR |
87 | /* |
88 | * On some AMD platforms, MWAITX has a configurable 32-bit timer, that | |
89 | * counts with TSC frequency. The input value is the loop of the | |
90 | * counter, it will exit when the timer expires. | |
91 | */ | |
92 | static void delay_mwaitx(unsigned long __loops) | |
93 | { | |
94 | u64 start, end, delay, loops = __loops; | |
95 | ||
96 | start = rdtsc_ordered(); | |
97 | ||
98 | for (;;) { | |
99 | delay = min_t(u64, MWAITX_MAX_LOOPS, loops); | |
100 | ||
101 | /* | |
102 | * Use cpu_tss as a cacheline-aligned, seldomly | |
103 | * accessed per-cpu variable as the monitor target. | |
104 | */ | |
84477336 | 105 | __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); |
b466bdb6 HR |
106 | |
107 | /* | |
108 | * AMD, like Intel, supports the EAX hint and EAX=0xf | |
109 | * means, do not enter any deep C-state and we use it | |
110 | * here in delay() to minimize wakeup latency. | |
111 | */ | |
112 | __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE); | |
113 | ||
114 | end = rdtsc_ordered(); | |
115 | ||
116 | if (loops <= end - start) | |
117 | break; | |
118 | ||
119 | loops -= end - start; | |
120 | ||
121 | start = end; | |
122 | } | |
123 | } | |
124 | ||
6f84fa2f | 125 | /* |
126 | * Since we calibrate only once at boot, this | |
127 | * function should be set once at boot and not changed | |
128 | */ | |
129 | static void (*delay_fn)(unsigned long) = delay_loop; | |
130 | ||
131 | void use_tsc_delay(void) | |
132 | { | |
b466bdb6 HR |
133 | if (delay_fn == delay_loop) |
134 | delay_fn = delay_tsc; | |
135 | } | |
136 | ||
137 | void use_mwaitx_delay(void) | |
138 | { | |
139 | delay_fn = delay_mwaitx; | |
6f84fa2f | 140 | } |
141 | ||
a18e3690 | 142 | int read_current_timer(unsigned long *timer_val) |
6f84fa2f | 143 | { |
144 | if (delay_fn == delay_tsc) { | |
4ea1636b | 145 | *timer_val = rdtsc(); |
6f84fa2f | 146 | return 0; |
147 | } | |
148 | return -1; | |
149 | } | |
1da177e4 LT |
150 | |
151 | void __delay(unsigned long loops) | |
152 | { | |
6f84fa2f | 153 | delay_fn(loops); |
1da177e4 | 154 | } |
f0fbf0ab | 155 | EXPORT_SYMBOL(__delay); |
1da177e4 LT |
156 | |
157 | inline void __const_udelay(unsigned long xloops) | |
158 | { | |
159 | int d0; | |
6f84fa2f | 160 | |
1da177e4 | 161 | xloops *= 4; |
f0fbf0ab | 162 | asm("mull %%edx" |
1da177e4 | 163 | :"=d" (xloops), "=&a" (d0) |
6f84fa2f | 164 | :"1" (xloops), "0" |
357089fc | 165 | (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4))); |
6f84fa2f | 166 | |
167 | __delay(++xloops); | |
1da177e4 | 168 | } |
f0fbf0ab | 169 | EXPORT_SYMBOL(__const_udelay); |
1da177e4 LT |
170 | |
171 | void __udelay(unsigned long usecs) | |
172 | { | |
6f84fa2f | 173 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ |
1da177e4 | 174 | } |
f0fbf0ab | 175 | EXPORT_SYMBOL(__udelay); |
1da177e4 LT |
176 | |
177 | void __ndelay(unsigned long nsecs) | |
178 | { | |
6f84fa2f | 179 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ |
1da177e4 | 180 | } |
129f6946 | 181 | EXPORT_SYMBOL(__ndelay); |