Commit | Line | Data |
---|---|---|
2fed0c50 GOC |
1 | #ifndef _X86_SPINLOCK_H_ |
2 | #define _X86_SPINLOCK_H_ | |
3 | ||
1075cf7a TG |
4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | |
6 | #include <asm/page.h> | |
7 | #include <asm/processor.h> | |
314cdbef | 8 | #include <linux/compiler.h> |
74d4affd | 9 | #include <asm/paravirt.h> |
1075cf7a TG |
10 | /* |
11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
12 | * | |
13 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
14 | * on the local processor, one does not. | |
15 | * | |
314cdbef NP |
16 | * These are fair FIFO ticket locks, which are currently limited to 256 |
17 | * CPUs. | |
1075cf7a TG |
18 | * |
19 | * (the type definitions are in asm/spinlock_types.h) | |
20 | */ | |
21 | ||
96a388de | 22 | #ifdef CONFIG_X86_32 |
1075cf7a | 23 | # define LOCK_PTR_REG "a" |
96a388de | 24 | #else |
1075cf7a TG |
25 | # define LOCK_PTR_REG "D" |
26 | #endif | |
27 | ||
3a556b26 NP |
28 | #if defined(CONFIG_X86_32) && \ |
29 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | |
30 | /* | |
31 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock | |
32 | * (PPro errata 66, 92) | |
33 | */ | |
34 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | |
35 | #else | |
36 | # define UNLOCK_LOCK_PREFIX | |
314cdbef NP |
37 | #endif |
38 | ||
3a556b26 NP |
39 | /* |
40 | * Ticket locks are conceptually two parts, one indicating the current head of | |
41 | * the queue, and the other indicating the current tail. The lock is acquired | |
42 | * by atomically noting the tail and incrementing it by one (thus adding | |
43 | * ourself to the queue and noting our position), then waiting until the head | |
44 | * becomes equal to the the initial value of the tail. | |
45 | * | |
46 | * We use an xadd covering *both* parts of the lock, to increment the tail and | |
47 | * also load the position of the head, which takes care of memory ordering | |
48 | * issues and should be optimal for the uncontended case. Note the tail must be | |
49 | * in the high part, because a wide xadd increment of the low part would carry | |
50 | * up and contaminate the high part. | |
51 | * | |
52 | * With fewer than 2^8 possible CPUs, we can use x86's partial registers to | |
53 | * save some instructions and make the code more elegant. There really isn't | |
54 | * much between them in performance though, especially as locks are out of line. | |
55 | */ | |
56 | #if (NR_CPUS < 256) | |
74d4affd | 57 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
1075cf7a | 58 | { |
39f004ba | 59 | int tmp = ACCESS_ONCE(lock->slock); |
314cdbef NP |
60 | |
61 | return (((tmp >> 8) & 0xff) != (tmp & 0xff)); | |
1075cf7a TG |
62 | } |
63 | ||
74d4affd | 64 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
1075cf7a | 65 | { |
39f004ba | 66 | int tmp = ACCESS_ONCE(lock->slock); |
314cdbef NP |
67 | |
68 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; | |
1075cf7a TG |
69 | } |
70 | ||
74d4affd | 71 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
1075cf7a | 72 | { |
314cdbef NP |
73 | short inc = 0x0100; |
74 | ||
d3bf60a6 | 75 | asm volatile ( |
314cdbef NP |
76 | LOCK_PREFIX "xaddw %w0, %1\n" |
77 | "1:\t" | |
78 | "cmpb %h0, %b0\n\t" | |
79 | "je 2f\n\t" | |
80 | "rep ; nop\n\t" | |
81 | "movb %1, %b0\n\t" | |
82 | /* don't need lfence here, because loads are in-order */ | |
1075cf7a | 83 | "jmp 1b\n" |
314cdbef | 84 | "2:" |
d3bf60a6 | 85 | : "+Q" (inc), "+m" (lock->slock) |
314cdbef | 86 | : |
d3bf60a6 | 87 | : "memory", "cc"); |
1075cf7a | 88 | } |
314cdbef | 89 | |
74d4affd | 90 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
1075cf7a | 91 | { |
314cdbef NP |
92 | int tmp; |
93 | short new; | |
1075cf7a | 94 | |
d3bf60a6 JP |
95 | asm volatile("movw %2,%w0\n\t" |
96 | "cmpb %h0,%b0\n\t" | |
97 | "jne 1f\n\t" | |
98 | "movw %w0,%w1\n\t" | |
99 | "incb %h1\n\t" | |
100 | "lock ; cmpxchgw %w1,%2\n\t" | |
101 | "1:" | |
102 | "sete %b1\n\t" | |
103 | "movzbl %b1,%0\n\t" | |
104 | : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) | |
105 | : | |
106 | : "memory", "cc"); | |
314cdbef NP |
107 | |
108 | return tmp; | |
1075cf7a TG |
109 | } |
110 | ||
74d4affd | 111 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
3a556b26 | 112 | { |
d3bf60a6 JP |
113 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
114 | : "+m" (lock->slock) | |
115 | : | |
116 | : "memory", "cc"); | |
3a556b26 | 117 | } |
1075cf7a | 118 | #else |
74d4affd | 119 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
3a556b26 | 120 | { |
39f004ba | 121 | int tmp = ACCESS_ONCE(lock->slock); |
3a556b26 NP |
122 | |
123 | return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); | |
124 | } | |
125 | ||
74d4affd | 126 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
3a556b26 | 127 | { |
39f004ba | 128 | int tmp = ACCESS_ONCE(lock->slock); |
3a556b26 NP |
129 | |
130 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; | |
131 | } | |
132 | ||
74d4affd | 133 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
3a556b26 NP |
134 | { |
135 | int inc = 0x00010000; | |
136 | int tmp; | |
137 | ||
d3bf60a6 JP |
138 | asm volatile("lock ; xaddl %0, %1\n" |
139 | "movzwl %w0, %2\n\t" | |
140 | "shrl $16, %0\n\t" | |
141 | "1:\t" | |
142 | "cmpl %0, %2\n\t" | |
143 | "je 2f\n\t" | |
144 | "rep ; nop\n\t" | |
145 | "movzwl %1, %2\n\t" | |
146 | /* don't need lfence here, because loads are in-order */ | |
147 | "jmp 1b\n" | |
148 | "2:" | |
149 | : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) | |
150 | : | |
151 | : "memory", "cc"); | |
3a556b26 NP |
152 | } |
153 | ||
74d4affd | 154 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
3a556b26 NP |
155 | { |
156 | int tmp; | |
157 | int new; | |
158 | ||
d3bf60a6 JP |
159 | asm volatile("movl %2,%0\n\t" |
160 | "movl %0,%1\n\t" | |
161 | "roll $16, %0\n\t" | |
162 | "cmpl %0,%1\n\t" | |
163 | "jne 1f\n\t" | |
164 | "addl $0x00010000, %1\n\t" | |
165 | "lock ; cmpxchgl %1,%2\n\t" | |
166 | "1:" | |
167 | "sete %b1\n\t" | |
168 | "movzbl %b1,%0\n\t" | |
169 | : "=&a" (tmp), "=r" (new), "+m" (lock->slock) | |
170 | : | |
171 | : "memory", "cc"); | |
3a556b26 NP |
172 | |
173 | return tmp; | |
174 | } | |
1075cf7a | 175 | |
74d4affd | 176 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
1075cf7a | 177 | { |
d3bf60a6 JP |
178 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
179 | : "+m" (lock->slock) | |
180 | : | |
181 | : "memory", "cc"); | |
1075cf7a | 182 | } |
3a556b26 | 183 | #endif |
1075cf7a | 184 | |
74d4affd JF |
185 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
186 | ||
8efcbab6 JF |
187 | #ifdef CONFIG_PARAVIRT |
188 | /* | |
189 | * Define virtualization-friendly old-style lock byte lock, for use in | |
190 | * pv_lock_ops if desired. | |
191 | * | |
192 | * This differs from the pre-2.6.24 spinlock by always using xchgb | |
193 | * rather than decb to take the lock; this allows it to use a | |
194 | * zero-initialized lock structure. It also maintains a 1-byte | |
195 | * contention counter, so that we can implement | |
196 | * __byte_spin_is_contended. | |
197 | */ | |
198 | struct __byte_spinlock { | |
199 | s8 lock; | |
200 | s8 spinners; | |
201 | }; | |
202 | ||
203 | static inline int __byte_spin_is_locked(raw_spinlock_t *lock) | |
204 | { | |
205 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | |
206 | return bl->lock != 0; | |
207 | } | |
208 | ||
209 | static inline int __byte_spin_is_contended(raw_spinlock_t *lock) | |
210 | { | |
211 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | |
212 | return bl->spinners != 0; | |
213 | } | |
214 | ||
215 | static inline void __byte_spin_lock(raw_spinlock_t *lock) | |
216 | { | |
217 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | |
218 | s8 val = 1; | |
219 | ||
220 | asm("1: xchgb %1, %0\n" | |
221 | " test %1,%1\n" | |
222 | " jz 3f\n" | |
223 | " " LOCK_PREFIX "incb %2\n" | |
224 | "2: rep;nop\n" | |
225 | " cmpb $1, %0\n" | |
226 | " je 2b\n" | |
227 | " " LOCK_PREFIX "decb %2\n" | |
228 | " jmp 1b\n" | |
229 | "3:" | |
230 | : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory"); | |
231 | } | |
232 | ||
233 | static inline int __byte_spin_trylock(raw_spinlock_t *lock) | |
234 | { | |
235 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | |
236 | u8 old = 1; | |
237 | ||
238 | asm("xchgb %1,%0" | |
239 | : "+m" (bl->lock), "+q" (old) : : "memory"); | |
240 | ||
241 | return old == 0; | |
242 | } | |
243 | ||
244 | static inline void __byte_spin_unlock(raw_spinlock_t *lock) | |
245 | { | |
246 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | |
247 | smp_wmb(); | |
248 | bl->lock = 0; | |
249 | } | |
250 | #else /* !CONFIG_PARAVIRT */ | |
74d4affd JF |
251 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
252 | { | |
253 | return __ticket_spin_is_locked(lock); | |
254 | } | |
255 | ||
256 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | |
257 | { | |
258 | return __ticket_spin_is_contended(lock); | |
259 | } | |
260 | ||
261 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | |
262 | { | |
263 | __ticket_spin_lock(lock); | |
264 | } | |
265 | ||
266 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | |
267 | { | |
268 | return __ticket_spin_trylock(lock); | |
269 | } | |
270 | ||
271 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | |
272 | { | |
273 | __ticket_spin_unlock(lock); | |
274 | } | |
275 | #endif /* CONFIG_PARAVIRT */ | |
276 | ||
1075cf7a TG |
277 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
278 | { | |
279 | while (__raw_spin_is_locked(lock)) | |
280 | cpu_relax(); | |
281 | } | |
282 | ||
283 | /* | |
284 | * Read-write spinlocks, allowing multiple readers | |
285 | * but only one writer. | |
286 | * | |
287 | * NOTE! it is quite common to have readers in interrupts | |
288 | * but no interrupt writers. For those circumstances we | |
289 | * can "mix" irq-safe locks - any writer needs to get a | |
290 | * irq-safe write-lock, but readers can get non-irqsafe | |
291 | * read-locks. | |
292 | * | |
293 | * On x86, we implement read-write locks as a 32-bit counter | |
294 | * with the high bit (sign) being the "contended" bit. | |
295 | */ | |
296 | ||
314cdbef NP |
297 | /** |
298 | * read_can_lock - would read_trylock() succeed? | |
299 | * @lock: the rwlock in question. | |
300 | */ | |
1075cf7a TG |
301 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
302 | { | |
303 | return (int)(lock)->lock > 0; | |
304 | } | |
305 | ||
314cdbef NP |
306 | /** |
307 | * write_can_lock - would write_trylock() succeed? | |
308 | * @lock: the rwlock in question. | |
309 | */ | |
1075cf7a TG |
310 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
311 | { | |
312 | return (lock)->lock == RW_LOCK_BIAS; | |
313 | } | |
314 | ||
315 | static inline void __raw_read_lock(raw_rwlock_t *rw) | |
316 | { | |
317 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | |
318 | "jns 1f\n" | |
319 | "call __read_lock_failed\n\t" | |
320 | "1:\n" | |
321 | ::LOCK_PTR_REG (rw) : "memory"); | |
322 | } | |
323 | ||
324 | static inline void __raw_write_lock(raw_rwlock_t *rw) | |
325 | { | |
326 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | |
327 | "jz 1f\n" | |
328 | "call __write_lock_failed\n\t" | |
329 | "1:\n" | |
330 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | |
331 | } | |
332 | ||
333 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | |
334 | { | |
335 | atomic_t *count = (atomic_t *)lock; | |
336 | ||
337 | atomic_dec(count); | |
338 | if (atomic_read(count) >= 0) | |
339 | return 1; | |
340 | atomic_inc(count); | |
341 | return 0; | |
342 | } | |
343 | ||
344 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | |
345 | { | |
346 | atomic_t *count = (atomic_t *)lock; | |
347 | ||
348 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
349 | return 1; | |
350 | atomic_add(RW_LOCK_BIAS, count); | |
351 | return 0; | |
352 | } | |
353 | ||
354 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | |
355 | { | |
356 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | |
357 | } | |
358 | ||
359 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | |
360 | { | |
361 | asm volatile(LOCK_PREFIX "addl %1, %0" | |
362 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | |
363 | } | |
364 | ||
365 | #define _raw_spin_relax(lock) cpu_relax() | |
366 | #define _raw_read_relax(lock) cpu_relax() | |
367 | #define _raw_write_relax(lock) cpu_relax() | |
368 | ||
2fed0c50 | 369 | #endif |