Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_SPINLOCK_H |
2 | #define _ASM_X86_SPINLOCK_H | |
2fed0c50 | 3 | |
1075cf7a TG |
4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | |
6 | #include <asm/page.h> | |
7 | #include <asm/processor.h> | |
314cdbef | 8 | #include <linux/compiler.h> |
74d4affd | 9 | #include <asm/paravirt.h> |
1075cf7a TG |
10 | /* |
11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
12 | * | |
13 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
14 | * on the local processor, one does not. | |
15 | * | |
314cdbef NP |
16 | * These are fair FIFO ticket locks, which are currently limited to 256 |
17 | * CPUs. | |
1075cf7a TG |
18 | * |
19 | * (the type definitions are in asm/spinlock_types.h) | |
20 | */ | |
21 | ||
96a388de | 22 | #ifdef CONFIG_X86_32 |
1075cf7a | 23 | # define LOCK_PTR_REG "a" |
74e91604 | 24 | # define REG_PTR_MODE "k" |
96a388de | 25 | #else |
1075cf7a | 26 | # define LOCK_PTR_REG "D" |
74e91604 | 27 | # define REG_PTR_MODE "q" |
1075cf7a TG |
28 | #endif |
29 | ||
3a556b26 NP |
30 | #if defined(CONFIG_X86_32) && \ |
31 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | |
32 | /* | |
33 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock | |
34 | * (PPro errata 66, 92) | |
35 | */ | |
36 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | |
37 | #else | |
38 | # define UNLOCK_LOCK_PREFIX | |
314cdbef NP |
39 | #endif |
40 | ||
3a556b26 NP |
41 | /* |
42 | * Ticket locks are conceptually two parts, one indicating the current head of | |
43 | * the queue, and the other indicating the current tail. The lock is acquired | |
44 | * by atomically noting the tail and incrementing it by one (thus adding | |
45 | * ourself to the queue and noting our position), then waiting until the head | |
46 | * becomes equal to the the initial value of the tail. | |
47 | * | |
48 | * We use an xadd covering *both* parts of the lock, to increment the tail and | |
49 | * also load the position of the head, which takes care of memory ordering | |
50 | * issues and should be optimal for the uncontended case. Note the tail must be | |
51 | * in the high part, because a wide xadd increment of the low part would carry | |
52 | * up and contaminate the high part. | |
53 | * | |
54 | * With fewer than 2^8 possible CPUs, we can use x86's partial registers to | |
55 | * save some instructions and make the code more elegant. There really isn't | |
56 | * much between them in performance though, especially as locks are out of line. | |
57 | */ | |
58 | #if (NR_CPUS < 256) | |
08f5fcbe | 59 | #define TICKET_SHIFT 8 |
1075cf7a | 60 | |
74d4affd | 61 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
1075cf7a | 62 | { |
314cdbef NP |
63 | short inc = 0x0100; |
64 | ||
d3bf60a6 | 65 | asm volatile ( |
314cdbef NP |
66 | LOCK_PREFIX "xaddw %w0, %1\n" |
67 | "1:\t" | |
68 | "cmpb %h0, %b0\n\t" | |
69 | "je 2f\n\t" | |
70 | "rep ; nop\n\t" | |
71 | "movb %1, %b0\n\t" | |
72 | /* don't need lfence here, because loads are in-order */ | |
1075cf7a | 73 | "jmp 1b\n" |
314cdbef | 74 | "2:" |
d3bf60a6 | 75 | : "+Q" (inc), "+m" (lock->slock) |
314cdbef | 76 | : |
d3bf60a6 | 77 | : "memory", "cc"); |
1075cf7a | 78 | } |
314cdbef | 79 | |
74d4affd | 80 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
1075cf7a | 81 | { |
74e91604 | 82 | int tmp, new; |
1075cf7a | 83 | |
74e91604 | 84 | asm volatile("movzwl %2, %0\n\t" |
d3bf60a6 | 85 | "cmpb %h0,%b0\n\t" |
74e91604 | 86 | "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" |
d3bf60a6 | 87 | "jne 1f\n\t" |
5bbd4c37 | 88 | LOCK_PREFIX "cmpxchgw %w1,%2\n\t" |
d3bf60a6 JP |
89 | "1:" |
90 | "sete %b1\n\t" | |
91 | "movzbl %b1,%0\n\t" | |
74e91604 | 92 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
d3bf60a6 JP |
93 | : |
94 | : "memory", "cc"); | |
314cdbef NP |
95 | |
96 | return tmp; | |
1075cf7a TG |
97 | } |
98 | ||
74d4affd | 99 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
3a556b26 | 100 | { |
d3bf60a6 JP |
101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
102 | : "+m" (lock->slock) | |
103 | : | |
104 | : "memory", "cc"); | |
3a556b26 | 105 | } |
1075cf7a | 106 | #else |
08f5fcbe | 107 | #define TICKET_SHIFT 16 |
3a556b26 | 108 | |
74d4affd | 109 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
3a556b26 NP |
110 | { |
111 | int inc = 0x00010000; | |
112 | int tmp; | |
113 | ||
5bbd4c37 | 114 | asm volatile(LOCK_PREFIX "xaddl %0, %1\n" |
d3bf60a6 JP |
115 | "movzwl %w0, %2\n\t" |
116 | "shrl $16, %0\n\t" | |
117 | "1:\t" | |
118 | "cmpl %0, %2\n\t" | |
119 | "je 2f\n\t" | |
120 | "rep ; nop\n\t" | |
121 | "movzwl %1, %2\n\t" | |
122 | /* don't need lfence here, because loads are in-order */ | |
123 | "jmp 1b\n" | |
124 | "2:" | |
ef1f3413 | 125 | : "+r" (inc), "+m" (lock->slock), "=&r" (tmp) |
d3bf60a6 JP |
126 | : |
127 | : "memory", "cc"); | |
3a556b26 NP |
128 | } |
129 | ||
74d4affd | 130 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
3a556b26 NP |
131 | { |
132 | int tmp; | |
133 | int new; | |
134 | ||
d3bf60a6 JP |
135 | asm volatile("movl %2,%0\n\t" |
136 | "movl %0,%1\n\t" | |
137 | "roll $16, %0\n\t" | |
138 | "cmpl %0,%1\n\t" | |
74e91604 | 139 | "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" |
d3bf60a6 | 140 | "jne 1f\n\t" |
5bbd4c37 | 141 | LOCK_PREFIX "cmpxchgl %1,%2\n\t" |
d3bf60a6 JP |
142 | "1:" |
143 | "sete %b1\n\t" | |
144 | "movzbl %b1,%0\n\t" | |
ef1f3413 | 145 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
d3bf60a6 JP |
146 | : |
147 | : "memory", "cc"); | |
3a556b26 NP |
148 | |
149 | return tmp; | |
150 | } | |
1075cf7a | 151 | |
74d4affd | 152 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
1075cf7a | 153 | { |
d3bf60a6 JP |
154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
155 | : "+m" (lock->slock) | |
156 | : | |
157 | : "memory", "cc"); | |
1075cf7a | 158 | } |
3a556b26 | 159 | #endif |
1075cf7a | 160 | |
08f5fcbe JB |
161 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
162 | { | |
163 | int tmp = ACCESS_ONCE(lock->slock); | |
164 | ||
165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); | |
166 | } | |
167 | ||
168 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | |
169 | { | |
170 | int tmp = ACCESS_ONCE(lock->slock); | |
171 | ||
172 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; | |
173 | } | |
74d4affd | 174 | |
b4ecc126 | 175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
8efcbab6 | 176 | |
74d4affd JF |
177 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
178 | { | |
179 | return __ticket_spin_is_locked(lock); | |
180 | } | |
181 | ||
182 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | |
183 | { | |
184 | return __ticket_spin_is_contended(lock); | |
185 | } | |
a5ef7ca0 | 186 | #define __raw_spin_is_contended __raw_spin_is_contended |
74d4affd JF |
187 | |
188 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | |
189 | { | |
190 | __ticket_spin_lock(lock); | |
191 | } | |
192 | ||
193 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | |
194 | { | |
195 | return __ticket_spin_trylock(lock); | |
196 | } | |
197 | ||
198 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | |
199 | { | |
200 | __ticket_spin_unlock(lock); | |
201 | } | |
63d3a75d JF |
202 | |
203 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | |
204 | unsigned long flags) | |
205 | { | |
206 | __raw_spin_lock(lock); | |
207 | } | |
208 | ||
b4ecc126 | 209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
74d4affd | 210 | |
1075cf7a TG |
211 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
212 | { | |
213 | while (__raw_spin_is_locked(lock)) | |
214 | cpu_relax(); | |
215 | } | |
216 | ||
217 | /* | |
218 | * Read-write spinlocks, allowing multiple readers | |
219 | * but only one writer. | |
220 | * | |
221 | * NOTE! it is quite common to have readers in interrupts | |
222 | * but no interrupt writers. For those circumstances we | |
223 | * can "mix" irq-safe locks - any writer needs to get a | |
224 | * irq-safe write-lock, but readers can get non-irqsafe | |
225 | * read-locks. | |
226 | * | |
227 | * On x86, we implement read-write locks as a 32-bit counter | |
228 | * with the high bit (sign) being the "contended" bit. | |
229 | */ | |
230 | ||
314cdbef NP |
231 | /** |
232 | * read_can_lock - would read_trylock() succeed? | |
233 | * @lock: the rwlock in question. | |
234 | */ | |
1075cf7a TG |
235 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
236 | { | |
237 | return (int)(lock)->lock > 0; | |
238 | } | |
239 | ||
314cdbef NP |
240 | /** |
241 | * write_can_lock - would write_trylock() succeed? | |
242 | * @lock: the rwlock in question. | |
243 | */ | |
1075cf7a TG |
244 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
245 | { | |
246 | return (lock)->lock == RW_LOCK_BIAS; | |
247 | } | |
248 | ||
249 | static inline void __raw_read_lock(raw_rwlock_t *rw) | |
250 | { | |
251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | |
252 | "jns 1f\n" | |
253 | "call __read_lock_failed\n\t" | |
254 | "1:\n" | |
255 | ::LOCK_PTR_REG (rw) : "memory"); | |
256 | } | |
257 | ||
258 | static inline void __raw_write_lock(raw_rwlock_t *rw) | |
259 | { | |
260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | |
261 | "jz 1f\n" | |
262 | "call __write_lock_failed\n\t" | |
263 | "1:\n" | |
264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | |
265 | } | |
266 | ||
267 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | |
268 | { | |
269 | atomic_t *count = (atomic_t *)lock; | |
270 | ||
2d4d57db | 271 | if (atomic_dec_return(count) >= 0) |
1075cf7a TG |
272 | return 1; |
273 | atomic_inc(count); | |
274 | return 0; | |
275 | } | |
276 | ||
277 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | |
278 | { | |
279 | atomic_t *count = (atomic_t *)lock; | |
280 | ||
281 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
282 | return 1; | |
283 | atomic_add(RW_LOCK_BIAS, count); | |
284 | return 0; | |
285 | } | |
286 | ||
287 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | |
288 | { | |
289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | |
290 | } | |
291 | ||
292 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | |
293 | { | |
294 | asm volatile(LOCK_PREFIX "addl %1, %0" | |
295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | |
296 | } | |
297 | ||
f5f7eac4 RH |
298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | |
300 | ||
1075cf7a TG |
301 | #define _raw_spin_relax(lock) cpu_relax() |
302 | #define _raw_read_relax(lock) cpu_relax() | |
303 | #define _raw_write_relax(lock) cpu_relax() | |
304 | ||
ad462769 JO |
305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ |
306 | static inline void smp_mb__after_lock(void) { } | |
307 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | |
308 | ||
1965aae3 | 309 | #endif /* _ASM_X86_SPINLOCK_H */ |