| 1 | /* |
| 2 | * Precise Delay Loops for i386 |
| 3 | * |
| 4 | * Copyright (C) 1993 Linus Torvalds |
| 5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> |
| 6 | * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com> |
| 7 | * |
| 8 | * The __delay function must _NOT_ be inlined as its execution time |
| 9 | * depends wildly on alignment on many x86 processors. The additional |
| 10 | * jump magic is needed to get the timing stable on all the CPU's |
| 11 | * we have to worry about. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/timex.h> |
| 17 | #include <linux/preempt.h> |
| 18 | #include <linux/delay.h> |
| 19 | |
| 20 | #include <asm/processor.h> |
| 21 | #include <asm/delay.h> |
| 22 | #include <asm/timer.h> |
| 23 | #include <asm/mwait.h> |
| 24 | |
| 25 | #ifdef CONFIG_SMP |
| 26 | # include <asm/smp.h> |
| 27 | #endif |
| 28 | |
| 29 | /* simple loop based delay: */ |
| 30 | static void delay_loop(unsigned long loops) |
| 31 | { |
| 32 | asm volatile( |
| 33 | " test %0,%0 \n" |
| 34 | " jz 3f \n" |
| 35 | " jmp 1f \n" |
| 36 | |
| 37 | ".align 16 \n" |
| 38 | "1: jmp 2f \n" |
| 39 | |
| 40 | ".align 16 \n" |
| 41 | "2: dec %0 \n" |
| 42 | " jnz 2b \n" |
| 43 | "3: dec %0 \n" |
| 44 | |
| 45 | : /* we don't need output */ |
| 46 | :"a" (loops) |
| 47 | ); |
| 48 | } |
| 49 | |
| 50 | /* TSC based delay: */ |
| 51 | static void delay_tsc(unsigned long __loops) |
| 52 | { |
| 53 | u64 bclock, now, loops = __loops; |
| 54 | int cpu; |
| 55 | |
| 56 | preempt_disable(); |
| 57 | cpu = smp_processor_id(); |
| 58 | bclock = rdtsc_ordered(); |
| 59 | for (;;) { |
| 60 | now = rdtsc_ordered(); |
| 61 | if ((now - bclock) >= loops) |
| 62 | break; |
| 63 | |
| 64 | /* Allow RT tasks to run */ |
| 65 | preempt_enable(); |
| 66 | rep_nop(); |
| 67 | preempt_disable(); |
| 68 | |
| 69 | /* |
| 70 | * It is possible that we moved to another CPU, and |
| 71 | * since TSC's are per-cpu we need to calculate |
| 72 | * that. The delay must guarantee that we wait "at |
| 73 | * least" the amount of time. Being moved to another |
| 74 | * CPU could make the wait longer but we just need to |
| 75 | * make sure we waited long enough. Rebalance the |
| 76 | * counter for this CPU. |
| 77 | */ |
| 78 | if (unlikely(cpu != smp_processor_id())) { |
| 79 | loops -= (now - bclock); |
| 80 | cpu = smp_processor_id(); |
| 81 | bclock = rdtsc_ordered(); |
| 82 | } |
| 83 | } |
| 84 | preempt_enable(); |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * On some AMD platforms, MWAITX has a configurable 32-bit timer, that |
| 89 | * counts with TSC frequency. The input value is the loop of the |
| 90 | * counter, it will exit when the timer expires. |
| 91 | */ |
| 92 | static void delay_mwaitx(unsigned long __loops) |
| 93 | { |
| 94 | u64 start, end, delay, loops = __loops; |
| 95 | |
| 96 | start = rdtsc_ordered(); |
| 97 | |
| 98 | for (;;) { |
| 99 | delay = min_t(u64, MWAITX_MAX_LOOPS, loops); |
| 100 | |
| 101 | /* |
| 102 | * Use cpu_tss as a cacheline-aligned, seldomly |
| 103 | * accessed per-cpu variable as the monitor target. |
| 104 | */ |
| 105 | __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); |
| 106 | |
| 107 | /* |
| 108 | * AMD, like Intel, supports the EAX hint and EAX=0xf |
| 109 | * means, do not enter any deep C-state and we use it |
| 110 | * here in delay() to minimize wakeup latency. |
| 111 | */ |
| 112 | __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE); |
| 113 | |
| 114 | end = rdtsc_ordered(); |
| 115 | |
| 116 | if (loops <= end - start) |
| 117 | break; |
| 118 | |
| 119 | loops -= end - start; |
| 120 | |
| 121 | start = end; |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | /* |
| 126 | * Since we calibrate only once at boot, this |
| 127 | * function should be set once at boot and not changed |
| 128 | */ |
| 129 | static void (*delay_fn)(unsigned long) = delay_loop; |
| 130 | |
| 131 | void use_tsc_delay(void) |
| 132 | { |
| 133 | if (delay_fn == delay_loop) |
| 134 | delay_fn = delay_tsc; |
| 135 | } |
| 136 | |
| 137 | void use_mwaitx_delay(void) |
| 138 | { |
| 139 | delay_fn = delay_mwaitx; |
| 140 | } |
| 141 | |
| 142 | int read_current_timer(unsigned long *timer_val) |
| 143 | { |
| 144 | if (delay_fn == delay_tsc) { |
| 145 | *timer_val = rdtsc(); |
| 146 | return 0; |
| 147 | } |
| 148 | return -1; |
| 149 | } |
| 150 | |
| 151 | void __delay(unsigned long loops) |
| 152 | { |
| 153 | delay_fn(loops); |
| 154 | } |
| 155 | EXPORT_SYMBOL(__delay); |
| 156 | |
| 157 | inline void __const_udelay(unsigned long xloops) |
| 158 | { |
| 159 | int d0; |
| 160 | |
| 161 | xloops *= 4; |
| 162 | asm("mull %%edx" |
| 163 | :"=d" (xloops), "=&a" (d0) |
| 164 | :"1" (xloops), "0" |
| 165 | (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4))); |
| 166 | |
| 167 | __delay(++xloops); |
| 168 | } |
| 169 | EXPORT_SYMBOL(__const_udelay); |
| 170 | |
| 171 | void __udelay(unsigned long usecs) |
| 172 | { |
| 173 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ |
| 174 | } |
| 175 | EXPORT_SYMBOL(__udelay); |
| 176 | |
| 177 | void __ndelay(unsigned long nsecs) |
| 178 | { |
| 179 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ |
| 180 | } |
| 181 | EXPORT_SYMBOL(__ndelay); |