Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Precise Delay Loops for i386 | |
3 | * | |
4 | * Copyright (C) 1993 Linus Torvalds | |
5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | |
e01b70ef | 6 | * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com> |
1da177e4 LT |
7 | * |
8 | * The __delay function must _NOT_ be inlined as its execution time | |
9 | * depends wildly on alignment on many x86 processors. The additional | |
10 | * jump magic is needed to get the timing stable on all the CPU's | |
11 | * we have to worry about. | |
12 | */ | |
13 | ||
6f84fa2f | 14 | #include <linux/module.h> |
1da177e4 | 15 | #include <linux/sched.h> |
941e492b | 16 | #include <linux/timex.h> |
35d5d08a | 17 | #include <linux/preempt.h> |
1da177e4 | 18 | #include <linux/delay.h> |
6f84fa2f | 19 | |
1da177e4 LT |
20 | #include <asm/processor.h> |
21 | #include <asm/delay.h> | |
22 | #include <asm/timer.h> | |
23 | ||
24 | #ifdef CONFIG_SMP | |
6f84fa2f | 25 | # include <asm/smp.h> |
1da177e4 LT |
26 | #endif |
27 | ||
6f84fa2f | 28 | /* simple loop based delay: */ |
29 | static void delay_loop(unsigned long loops) | |
30 | { | |
f0fbf0ab | 31 | asm volatile( |
e01b70ef JH |
32 | " test %0,%0 \n" |
33 | " jz 3f \n" | |
34 | " jmp 1f \n" | |
35 | ||
36 | ".align 16 \n" | |
37 | "1: jmp 2f \n" | |
38 | ||
39 | ".align 16 \n" | |
ff1b15b6 | 40 | "2: dec %0 \n" |
e01b70ef | 41 | " jnz 2b \n" |
ff1b15b6 | 42 | "3: dec %0 \n" |
e01b70ef JH |
43 | |
44 | : /* we don't need output */ | |
45 | :"a" (loops) | |
46 | ); | |
6f84fa2f | 47 | } |
48 | ||
49 | /* TSC based delay: */ | |
a7f4255f | 50 | static void delay_tsc(unsigned long __loops) |
6f84fa2f | 51 | { |
9cfa1a02 | 52 | u64 bclock, now, loops = __loops; |
5c1ea082 | 53 | int cpu; |
6f84fa2f | 54 | |
5c1ea082 SR |
55 | preempt_disable(); |
56 | cpu = smp_processor_id(); | |
e888d7fa | 57 | rdtsc_barrier(); |
4ea1636b | 58 | bclock = rdtsc(); |
5c1ea082 | 59 | for (;;) { |
e888d7fa | 60 | rdtsc_barrier(); |
4ea1636b | 61 | now = rdtsc(); |
5c1ea082 SR |
62 | if ((now - bclock) >= loops) |
63 | break; | |
64 | ||
65 | /* Allow RT tasks to run */ | |
66 | preempt_enable(); | |
67 | rep_nop(); | |
68 | preempt_disable(); | |
69 | ||
70 | /* | |
71 | * It is possible that we moved to another CPU, and | |
72 | * since TSC's are per-cpu we need to calculate | |
73 | * that. The delay must guarantee that we wait "at | |
74 | * least" the amount of time. Being moved to another | |
75 | * CPU could make the wait longer but we just need to | |
76 | * make sure we waited long enough. Rebalance the | |
77 | * counter for this CPU. | |
78 | */ | |
79 | if (unlikely(cpu != smp_processor_id())) { | |
80 | loops -= (now - bclock); | |
81 | cpu = smp_processor_id(); | |
e888d7fa | 82 | rdtsc_barrier(); |
4ea1636b | 83 | bclock = rdtsc(); |
5c1ea082 SR |
84 | } |
85 | } | |
35d5d08a | 86 | preempt_enable(); |
6f84fa2f | 87 | } |
88 | ||
89 | /* | |
90 | * Since we calibrate only once at boot, this | |
91 | * function should be set once at boot and not changed | |
92 | */ | |
93 | static void (*delay_fn)(unsigned long) = delay_loop; | |
94 | ||
95 | void use_tsc_delay(void) | |
96 | { | |
97 | delay_fn = delay_tsc; | |
98 | } | |
99 | ||
a18e3690 | 100 | int read_current_timer(unsigned long *timer_val) |
6f84fa2f | 101 | { |
102 | if (delay_fn == delay_tsc) { | |
4ea1636b | 103 | *timer_val = rdtsc(); |
6f84fa2f | 104 | return 0; |
105 | } | |
106 | return -1; | |
107 | } | |
1da177e4 LT |
108 | |
109 | void __delay(unsigned long loops) | |
110 | { | |
6f84fa2f | 111 | delay_fn(loops); |
1da177e4 | 112 | } |
f0fbf0ab | 113 | EXPORT_SYMBOL(__delay); |
1da177e4 LT |
114 | |
115 | inline void __const_udelay(unsigned long xloops) | |
116 | { | |
117 | int d0; | |
6f84fa2f | 118 | |
1da177e4 | 119 | xloops *= 4; |
f0fbf0ab | 120 | asm("mull %%edx" |
1da177e4 | 121 | :"=d" (xloops), "=&a" (d0) |
6f84fa2f | 122 | :"1" (xloops), "0" |
357089fc | 123 | (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4))); |
6f84fa2f | 124 | |
125 | __delay(++xloops); | |
1da177e4 | 126 | } |
f0fbf0ab | 127 | EXPORT_SYMBOL(__const_udelay); |
1da177e4 LT |
128 | |
129 | void __udelay(unsigned long usecs) | |
130 | { | |
6f84fa2f | 131 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ |
1da177e4 | 132 | } |
f0fbf0ab | 133 | EXPORT_SYMBOL(__udelay); |
1da177e4 LT |
134 | |
135 | void __ndelay(unsigned long nsecs) | |
136 | { | |
6f84fa2f | 137 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ |
1da177e4 | 138 | } |
129f6946 | 139 | EXPORT_SYMBOL(__ndelay); |