Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Precise Delay Loops for i386 | |
3 | * | |
4 | * Copyright (C) 1993 Linus Torvalds | |
5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | |
e01b70ef | 6 | * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com> |
1da177e4 LT |
7 | * |
8 | * The __delay function must _NOT_ be inlined as its execution time | |
9 | * depends wildly on alignment on many x86 processors. The additional | |
10 | * jump magic is needed to get the timing stable on all the CPU's | |
11 | * we have to worry about. | |
12 | */ | |
13 | ||
6f84fa2f | 14 | #include <linux/module.h> |
1da177e4 | 15 | #include <linux/sched.h> |
941e492b | 16 | #include <linux/timex.h> |
35d5d08a | 17 | #include <linux/preempt.h> |
1da177e4 | 18 | #include <linux/delay.h> |
6f84fa2f | 19 | |
1da177e4 LT |
20 | #include <asm/processor.h> |
21 | #include <asm/delay.h> | |
22 | #include <asm/timer.h> | |
23 | ||
24 | #ifdef CONFIG_SMP | |
6f84fa2f | 25 | # include <asm/smp.h> |
1da177e4 LT |
26 | #endif |
27 | ||
6f84fa2f | 28 | /* simple loop based delay: */ |
29 | static void delay_loop(unsigned long loops) | |
30 | { | |
f0fbf0ab | 31 | asm volatile( |
e01b70ef JH |
32 | " test %0,%0 \n" |
33 | " jz 3f \n" | |
34 | " jmp 1f \n" | |
35 | ||
36 | ".align 16 \n" | |
37 | "1: jmp 2f \n" | |
38 | ||
39 | ".align 16 \n" | |
ff1b15b6 | 40 | "2: dec %0 \n" |
e01b70ef | 41 | " jnz 2b \n" |
ff1b15b6 | 42 | "3: dec %0 \n" |
e01b70ef JH |
43 | |
44 | : /* we don't need output */ | |
45 | :"a" (loops) | |
46 | ); | |
6f84fa2f | 47 | } |
48 | ||
49 | /* TSC based delay: */ | |
a7f4255f | 50 | static void delay_tsc(unsigned long __loops) |
6f84fa2f | 51 | { |
9cfa1a02 | 52 | u64 bclock, now, loops = __loops; |
5c1ea082 | 53 | int cpu; |
6f84fa2f | 54 | |
5c1ea082 SR |
55 | preempt_disable(); |
56 | cpu = smp_processor_id(); | |
03b9730b | 57 | bclock = rdtsc_ordered(); |
5c1ea082 | 58 | for (;;) { |
03b9730b | 59 | now = rdtsc_ordered(); |
5c1ea082 SR |
60 | if ((now - bclock) >= loops) |
61 | break; | |
62 | ||
63 | /* Allow RT tasks to run */ | |
64 | preempt_enable(); | |
65 | rep_nop(); | |
66 | preempt_disable(); | |
67 | ||
68 | /* | |
69 | * It is possible that we moved to another CPU, and | |
70 | * since TSC's are per-cpu we need to calculate | |
71 | * that. The delay must guarantee that we wait "at | |
72 | * least" the amount of time. Being moved to another | |
73 | * CPU could make the wait longer but we just need to | |
74 | * make sure we waited long enough. Rebalance the | |
75 | * counter for this CPU. | |
76 | */ | |
77 | if (unlikely(cpu != smp_processor_id())) { | |
78 | loops -= (now - bclock); | |
79 | cpu = smp_processor_id(); | |
03b9730b | 80 | bclock = rdtsc_ordered(); |
5c1ea082 SR |
81 | } |
82 | } | |
35d5d08a | 83 | preempt_enable(); |
6f84fa2f | 84 | } |
85 | ||
86 | /* | |
87 | * Since we calibrate only once at boot, this | |
88 | * function should be set once at boot and not changed | |
89 | */ | |
90 | static void (*delay_fn)(unsigned long) = delay_loop; | |
91 | ||
92 | void use_tsc_delay(void) | |
93 | { | |
94 | delay_fn = delay_tsc; | |
95 | } | |
96 | ||
a18e3690 | 97 | int read_current_timer(unsigned long *timer_val) |
6f84fa2f | 98 | { |
99 | if (delay_fn == delay_tsc) { | |
4ea1636b | 100 | *timer_val = rdtsc(); |
6f84fa2f | 101 | return 0; |
102 | } | |
103 | return -1; | |
104 | } | |
1da177e4 LT |
105 | |
106 | void __delay(unsigned long loops) | |
107 | { | |
6f84fa2f | 108 | delay_fn(loops); |
1da177e4 | 109 | } |
f0fbf0ab | 110 | EXPORT_SYMBOL(__delay); |
1da177e4 LT |
111 | |
112 | inline void __const_udelay(unsigned long xloops) | |
113 | { | |
114 | int d0; | |
6f84fa2f | 115 | |
1da177e4 | 116 | xloops *= 4; |
f0fbf0ab | 117 | asm("mull %%edx" |
1da177e4 | 118 | :"=d" (xloops), "=&a" (d0) |
6f84fa2f | 119 | :"1" (xloops), "0" |
357089fc | 120 | (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4))); |
6f84fa2f | 121 | |
122 | __delay(++xloops); | |
1da177e4 | 123 | } |
f0fbf0ab | 124 | EXPORT_SYMBOL(__const_udelay); |
1da177e4 LT |
125 | |
126 | void __udelay(unsigned long usecs) | |
127 | { | |
6f84fa2f | 128 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ |
1da177e4 | 129 | } |
f0fbf0ab | 130 | EXPORT_SYMBOL(__udelay); |
1da177e4 LT |
131 | |
132 | void __ndelay(unsigned long nsecs) | |
133 | { | |
6f84fa2f | 134 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ |
1da177e4 | 135 | } |
129f6946 | 136 | EXPORT_SYMBOL(__ndelay); |