Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
88ced031 | 3 | #ifdef __KERNEL__ |
1da177e4 LT |
4 | |
5 | /* | |
6 | * Simple spin lock operations. | |
7 | * | |
8 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | |
9 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | |
10 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | |
11 | * Rework to support virtual processors | |
12 | * | |
13 | * Type of int is used as a full 64b word is not necessary. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
fb1c8f93 IM |
19 | * |
20 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 | 21 | */ |
945feb17 | 22 | #include <linux/irqflags.h> |
0212ddd8 | 23 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
24 | #include <asm/paca.h> |
25 | #include <asm/hvcall.h> | |
0212ddd8 PM |
26 | #endif |
27 | #include <asm/asm-compat.h> | |
28 | #include <asm/synch.h> | |
4e14a4d1 | 29 | #include <asm/ppc-opcode.h> |
1da177e4 | 30 | |
919fc6e3 PM |
31 | #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ |
32 | ||
0212ddd8 PM |
33 | #ifdef CONFIG_PPC64 |
34 | /* use 0x800000yy when locked, where yy == CPU number */ | |
54bb7f4b | 35 | #ifdef __BIG_ENDIAN__ |
0212ddd8 PM |
36 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
37 | #else | |
54bb7f4b AB |
38 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) |
39 | #endif | |
40 | #else | |
0212ddd8 PM |
41 | #define LOCK_TOKEN 1 |
42 | #endif | |
43 | ||
f007cacf PM |
44 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) |
45 | #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) | |
46 | #define SYNC_IO do { \ | |
47 | if (unlikely(get_paca()->io_sync)) { \ | |
48 | mb(); \ | |
49 | get_paca()->io_sync = 0; \ | |
50 | } \ | |
51 | } while (0) | |
52 | #else | |
53 | #define CLEAR_IO_SYNC | |
54 | #define SYNC_IO | |
55 | #endif | |
56 | ||
3405d230 ME |
57 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
58 | { | |
59 | return lock.slock == 0; | |
60 | } | |
61 | ||
7179ba52 ME |
62 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
63 | { | |
51d7d520 | 64 | smp_mb(); |
7179ba52 ME |
65 | return !arch_spin_value_unlocked(*lock); |
66 | } | |
67 | ||
fb1c8f93 IM |
68 | /* |
69 | * This returns the old value in the lock, so we succeeded | |
70 | * in getting the lock if the return value is 0. | |
71 | */ | |
0199c4e6 | 72 | static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) |
fb1c8f93 | 73 | { |
0212ddd8 | 74 | unsigned long tmp, token; |
1da177e4 | 75 | |
0212ddd8 | 76 | token = LOCK_TOKEN; |
fb1c8f93 | 77 | __asm__ __volatile__( |
4e14a4d1 | 78 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
fb1c8f93 IM |
79 | cmpwi 0,%0,0\n\ |
80 | bne- 2f\n\ | |
81 | stwcx. %1,0,%2\n\ | |
f10e2e5b AB |
82 | bne- 1b\n" |
83 | PPC_ACQUIRE_BARRIER | |
84 | "2:" | |
85 | : "=&r" (tmp) | |
0212ddd8 | 86 | : "r" (token), "r" (&lock->slock) |
fb1c8f93 | 87 | : "cr0", "memory"); |
1da177e4 | 88 | |
fb1c8f93 IM |
89 | return tmp; |
90 | } | |
1da177e4 | 91 | |
0199c4e6 | 92 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 | 93 | { |
f007cacf | 94 | CLEAR_IO_SYNC; |
0199c4e6 | 95 | return __arch_spin_trylock(lock) == 0; |
1da177e4 LT |
96 | } |
97 | ||
98 | /* | |
99 | * On a system with shared processors (that is, where a physical | |
100 | * processor is multiplexed between several virtual processors), | |
101 | * there is no point spinning on a lock if the holder of the lock | |
102 | * isn't currently scheduled on a physical processor. Instead | |
103 | * we detect this situation and ask the hypervisor to give the | |
104 | * rest of our timeslice to the lock holder. | |
105 | * | |
106 | * So that we can tell which virtual processor is holding a lock, | |
107 | * we put 0x80000000 | smp_processor_id() in the lock when it is | |
108 | * held. Conveniently, we have a word in the paca that holds this | |
109 | * value. | |
110 | */ | |
111 | ||
1b041885 | 112 | #if defined(CONFIG_PPC_SPLPAR) |
1da177e4 | 113 | /* We only yield to the hypervisor if we are in shared processor mode */ |
f13c13a0 | 114 | #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) |
445c8951 | 115 | extern void __spin_yield(arch_spinlock_t *lock); |
fb3a6bbc | 116 | extern void __rw_yield(arch_rwlock_t *lock); |
1b041885 | 117 | #else /* SPLPAR */ |
1da177e4 LT |
118 | #define __spin_yield(x) barrier() |
119 | #define __rw_yield(x) barrier() | |
120 | #define SHARED_PROCESSOR 0 | |
121 | #endif | |
1da177e4 | 122 | |
0199c4e6 | 123 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 | 124 | { |
f007cacf | 125 | CLEAR_IO_SYNC; |
1da177e4 | 126 | while (1) { |
0199c4e6 | 127 | if (likely(__arch_spin_trylock(lock) == 0)) |
1da177e4 LT |
128 | break; |
129 | do { | |
130 | HMT_low(); | |
131 | if (SHARED_PROCESSOR) | |
132 | __spin_yield(lock); | |
fb1c8f93 | 133 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
134 | HMT_medium(); |
135 | } | |
136 | } | |
137 | ||
89b5810f | 138 | static inline |
0199c4e6 | 139 | void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
1da177e4 LT |
140 | { |
141 | unsigned long flags_dis; | |
142 | ||
f007cacf | 143 | CLEAR_IO_SYNC; |
1da177e4 | 144 | while (1) { |
0199c4e6 | 145 | if (likely(__arch_spin_trylock(lock) == 0)) |
1da177e4 LT |
146 | break; |
147 | local_save_flags(flags_dis); | |
148 | local_irq_restore(flags); | |
149 | do { | |
150 | HMT_low(); | |
151 | if (SHARED_PROCESSOR) | |
152 | __spin_yield(lock); | |
fb1c8f93 | 153 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
154 | HMT_medium(); |
155 | local_irq_restore(flags_dis); | |
156 | } | |
157 | } | |
158 | ||
0199c4e6 | 159 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
fb1c8f93 | 160 | { |
f007cacf | 161 | SYNC_IO; |
0199c4e6 | 162 | __asm__ __volatile__("# arch_spin_unlock\n\t" |
f10e2e5b | 163 | PPC_RELEASE_BARRIER: : :"memory"); |
fb1c8f93 IM |
164 | lock->slock = 0; |
165 | } | |
166 | ||
0212ddd8 | 167 | #ifdef CONFIG_PPC64 |
0199c4e6 | 168 | extern void arch_spin_unlock_wait(arch_spinlock_t *lock); |
0212ddd8 | 169 | #else |
0199c4e6 TG |
170 | #define arch_spin_unlock_wait(lock) \ |
171 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | |
0212ddd8 | 172 | #endif |
fb1c8f93 | 173 | |
1da177e4 LT |
174 | /* |
175 | * Read-write spinlocks, allowing multiple readers | |
176 | * but only one writer. | |
177 | * | |
178 | * NOTE! it is quite common to have readers in interrupts | |
179 | * but no interrupt writers. For those circumstances we | |
180 | * can "mix" irq-safe locks - any writer needs to get a | |
181 | * irq-safe write-lock, but readers can get non-irqsafe | |
182 | * read-locks. | |
183 | */ | |
1da177e4 | 184 | |
e5931943 TG |
185 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
186 | #define arch_write_can_lock(rw) (!(rw)->lock) | |
1da177e4 | 187 | |
0212ddd8 PM |
188 | #ifdef CONFIG_PPC64 |
189 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | |
190 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ | |
191 | #else | |
192 | #define __DO_SIGN_EXTEND | |
193 | #define WRLOCK_TOKEN (-1) | |
194 | #endif | |
195 | ||
1da177e4 LT |
196 | /* |
197 | * This returns the old value in the lock + 1, | |
198 | * so we got a read lock if the return value is > 0. | |
199 | */ | |
e5931943 | 200 | static inline long __arch_read_trylock(arch_rwlock_t *rw) |
1da177e4 LT |
201 | { |
202 | long tmp; | |
203 | ||
204 | __asm__ __volatile__( | |
4e14a4d1 | 205 | "1: " PPC_LWARX(%0,0,%1,1) "\n" |
0212ddd8 PM |
206 | __DO_SIGN_EXTEND |
207 | " addic. %0,%0,1\n\ | |
208 | ble- 2f\n" | |
209 | PPC405_ERR77(0,%1) | |
210 | " stwcx. %0,0,%1\n\ | |
f10e2e5b AB |
211 | bne- 1b\n" |
212 | PPC_ACQUIRE_BARRIER | |
213 | "2:" : "=&r" (tmp) | |
1da177e4 LT |
214 | : "r" (&rw->lock) |
215 | : "cr0", "xer", "memory"); | |
216 | ||
217 | return tmp; | |
218 | } | |
219 | ||
1da177e4 LT |
220 | /* |
221 | * This returns the old value in the lock, | |
222 | * so we got the write lock if the return value is 0. | |
223 | */ | |
e5931943 | 224 | static inline long __arch_write_trylock(arch_rwlock_t *rw) |
1da177e4 | 225 | { |
0212ddd8 | 226 | long tmp, token; |
1da177e4 | 227 | |
0212ddd8 | 228 | token = WRLOCK_TOKEN; |
1da177e4 | 229 | __asm__ __volatile__( |
4e14a4d1 | 230 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
1da177e4 | 231 | cmpwi 0,%0,0\n\ |
0212ddd8 PM |
232 | bne- 2f\n" |
233 | PPC405_ERR77(0,%1) | |
234 | " stwcx. %1,0,%2\n\ | |
f10e2e5b AB |
235 | bne- 1b\n" |
236 | PPC_ACQUIRE_BARRIER | |
237 | "2:" : "=&r" (tmp) | |
0212ddd8 | 238 | : "r" (token), "r" (&rw->lock) |
1da177e4 LT |
239 | : "cr0", "memory"); |
240 | ||
241 | return tmp; | |
242 | } | |
243 | ||
e5931943 | 244 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 | 245 | { |
fb1c8f93 | 246 | while (1) { |
e5931943 | 247 | if (likely(__arch_read_trylock(rw) > 0)) |
fb1c8f93 IM |
248 | break; |
249 | do { | |
250 | HMT_low(); | |
251 | if (SHARED_PROCESSOR) | |
252 | __rw_yield(rw); | |
253 | } while (unlikely(rw->lock < 0)); | |
254 | HMT_medium(); | |
255 | } | |
1da177e4 LT |
256 | } |
257 | ||
e5931943 | 258 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
259 | { |
260 | while (1) { | |
e5931943 | 261 | if (likely(__arch_write_trylock(rw) == 0)) |
1da177e4 LT |
262 | break; |
263 | do { | |
264 | HMT_low(); | |
265 | if (SHARED_PROCESSOR) | |
266 | __rw_yield(rw); | |
d637413f | 267 | } while (unlikely(rw->lock != 0)); |
1da177e4 LT |
268 | HMT_medium(); |
269 | } | |
270 | } | |
271 | ||
e5931943 | 272 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
fb1c8f93 | 273 | { |
e5931943 | 274 | return __arch_read_trylock(rw) > 0; |
fb1c8f93 IM |
275 | } |
276 | ||
e5931943 | 277 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
fb1c8f93 | 278 | { |
e5931943 | 279 | return __arch_write_trylock(rw) == 0; |
fb1c8f93 IM |
280 | } |
281 | ||
e5931943 | 282 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
fb1c8f93 IM |
283 | { |
284 | long tmp; | |
285 | ||
286 | __asm__ __volatile__( | |
144b9c13 | 287 | "# read_unlock\n\t" |
f10e2e5b | 288 | PPC_RELEASE_BARRIER |
144b9c13 | 289 | "1: lwarx %0,0,%1\n\ |
0212ddd8 PM |
290 | addic %0,%0,-1\n" |
291 | PPC405_ERR77(0,%1) | |
292 | " stwcx. %0,0,%1\n\ | |
fb1c8f93 IM |
293 | bne- 1b" |
294 | : "=&r"(tmp) | |
295 | : "r"(&rw->lock) | |
efc3624c | 296 | : "cr0", "xer", "memory"); |
fb1c8f93 IM |
297 | } |
298 | ||
e5931943 | 299 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
fb1c8f93 | 300 | { |
144b9c13 | 301 | __asm__ __volatile__("# write_unlock\n\t" |
f10e2e5b | 302 | PPC_RELEASE_BARRIER: : :"memory"); |
fb1c8f93 IM |
303 | rw->lock = 0; |
304 | } | |
305 | ||
e5931943 TG |
306 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
307 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 308 | |
0199c4e6 TG |
309 | #define arch_spin_relax(lock) __spin_yield(lock) |
310 | #define arch_read_relax(lock) __rw_yield(lock) | |
311 | #define arch_write_relax(lock) __rw_yield(lock) | |
ef6edc97 | 312 | |
88ced031 | 313 | #endif /* __KERNEL__ */ |
1da177e4 | 314 | #endif /* __ASM_SPINLOCK_H */ |