Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | /* | |
5 | * Simple spin lock operations. | |
6 | * | |
7 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | |
8 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | |
9 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | |
10 | * Rework to support virtual processors | |
11 | * | |
12 | * Type of int is used as a full 64b word is not necessary. | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU General Public License | |
16 | * as published by the Free Software Foundation; either version | |
17 | * 2 of the License, or (at your option) any later version. | |
fb1c8f93 IM |
18 | * |
19 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 LT |
20 | */ |
21 | #include <linux/config.h> | |
22 | #include <asm/paca.h> | |
23 | #include <asm/hvcall.h> | |
1da44037 | 24 | #include <asm/iseries/hv_call.h> |
1da177e4 | 25 | |
fb1c8f93 | 26 | #define __raw_spin_is_locked(x) ((x)->slock != 0) |
1da177e4 | 27 | |
fb1c8f93 IM |
28 | /* |
29 | * This returns the old value in the lock, so we succeeded | |
30 | * in getting the lock if the return value is 0. | |
31 | */ | |
32 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | |
33 | { | |
34 | unsigned long tmp, tmp2; | |
1da177e4 | 35 | |
fb1c8f93 IM |
36 | __asm__ __volatile__( |
37 | " lwz %1,%3(13) # __spin_trylock\n\ | |
38 | 1: lwarx %0,0,%2\n\ | |
39 | cmpwi 0,%0,0\n\ | |
40 | bne- 2f\n\ | |
41 | stwcx. %1,0,%2\n\ | |
42 | bne- 1b\n\ | |
43 | isync\n\ | |
44 | 2:" : "=&r" (tmp), "=&r" (tmp2) | |
45 | : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) | |
46 | : "cr0", "memory"); | |
1da177e4 | 47 | |
fb1c8f93 IM |
48 | return tmp; |
49 | } | |
1da177e4 | 50 | |
fb1c8f93 | 51 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 | 52 | { |
fb1c8f93 | 53 | return __spin_trylock(lock) == 0; |
1da177e4 LT |
54 | } |
55 | ||
56 | /* | |
57 | * On a system with shared processors (that is, where a physical | |
58 | * processor is multiplexed between several virtual processors), | |
59 | * there is no point spinning on a lock if the holder of the lock | |
60 | * isn't currently scheduled on a physical processor. Instead | |
61 | * we detect this situation and ask the hypervisor to give the | |
62 | * rest of our timeslice to the lock holder. | |
63 | * | |
64 | * So that we can tell which virtual processor is holding a lock, | |
65 | * we put 0x80000000 | smp_processor_id() in the lock when it is | |
66 | * held. Conveniently, we have a word in the paca that holds this | |
67 | * value. | |
68 | */ | |
69 | ||
70 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | |
71 | /* We only yield to the hypervisor if we are in shared processor mode */ | |
72 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | |
fb1c8f93 IM |
73 | extern void __spin_yield(raw_spinlock_t *lock); |
74 | extern void __rw_yield(raw_rwlock_t *lock); | |
1da177e4 LT |
75 | #else /* SPLPAR || ISERIES */ |
76 | #define __spin_yield(x) barrier() | |
77 | #define __rw_yield(x) barrier() | |
78 | #define SHARED_PROCESSOR 0 | |
79 | #endif | |
1da177e4 | 80 | |
fb1c8f93 | 81 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
82 | { |
83 | while (1) { | |
84 | if (likely(__spin_trylock(lock) == 0)) | |
85 | break; | |
86 | do { | |
87 | HMT_low(); | |
88 | if (SHARED_PROCESSOR) | |
89 | __spin_yield(lock); | |
fb1c8f93 | 90 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
91 | HMT_medium(); |
92 | } | |
93 | } | |
94 | ||
fb1c8f93 | 95 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
1da177e4 LT |
96 | { |
97 | unsigned long flags_dis; | |
98 | ||
99 | while (1) { | |
100 | if (likely(__spin_trylock(lock) == 0)) | |
101 | break; | |
102 | local_save_flags(flags_dis); | |
103 | local_irq_restore(flags); | |
104 | do { | |
105 | HMT_low(); | |
106 | if (SHARED_PROCESSOR) | |
107 | __spin_yield(lock); | |
fb1c8f93 | 108 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
109 | HMT_medium(); |
110 | local_irq_restore(flags_dis); | |
111 | } | |
112 | } | |
113 | ||
fb1c8f93 IM |
114 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
115 | { | |
116 | __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); | |
117 | lock->slock = 0; | |
118 | } | |
119 | ||
120 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |
121 | ||
1da177e4 LT |
122 | /* |
123 | * Read-write spinlocks, allowing multiple readers | |
124 | * but only one writer. | |
125 | * | |
126 | * NOTE! it is quite common to have readers in interrupts | |
127 | * but no interrupt writers. For those circumstances we | |
128 | * can "mix" irq-safe locks - any writer needs to get a | |
129 | * irq-safe write-lock, but readers can get non-irqsafe | |
130 | * read-locks. | |
131 | */ | |
1da177e4 | 132 | |
fb1c8f93 IM |
133 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
134 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
1da177e4 LT |
135 | |
136 | /* | |
137 | * This returns the old value in the lock + 1, | |
138 | * so we got a read lock if the return value is > 0. | |
139 | */ | |
fb1c8f93 | 140 | static long __inline__ __read_trylock(raw_rwlock_t *rw) |
1da177e4 LT |
141 | { |
142 | long tmp; | |
143 | ||
144 | __asm__ __volatile__( | |
145 | "1: lwarx %0,0,%1 # read_trylock\n\ | |
146 | extsw %0,%0\n\ | |
147 | addic. %0,%0,1\n\ | |
148 | ble- 2f\n\ | |
149 | stwcx. %0,0,%1\n\ | |
150 | bne- 1b\n\ | |
151 | isync\n\ | |
152 | 2:" : "=&r" (tmp) | |
153 | : "r" (&rw->lock) | |
154 | : "cr0", "xer", "memory"); | |
155 | ||
156 | return tmp; | |
157 | } | |
158 | ||
1da177e4 LT |
159 | /* |
160 | * This returns the old value in the lock, | |
161 | * so we got the write lock if the return value is 0. | |
162 | */ | |
fb1c8f93 | 163 | static __inline__ long __write_trylock(raw_rwlock_t *rw) |
1da177e4 LT |
164 | { |
165 | long tmp, tmp2; | |
166 | ||
167 | __asm__ __volatile__( | |
168 | " lwz %1,%3(13) # write_trylock\n\ | |
169 | 1: lwarx %0,0,%2\n\ | |
170 | cmpwi 0,%0,0\n\ | |
171 | bne- 2f\n\ | |
172 | stwcx. %1,0,%2\n\ | |
173 | bne- 1b\n\ | |
174 | isync\n\ | |
175 | 2:" : "=&r" (tmp), "=&r" (tmp2) | |
176 | : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token)) | |
177 | : "cr0", "memory"); | |
178 | ||
179 | return tmp; | |
180 | } | |
181 | ||
fb1c8f93 | 182 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 | 183 | { |
fb1c8f93 IM |
184 | while (1) { |
185 | if (likely(__read_trylock(rw) > 0)) | |
186 | break; | |
187 | do { | |
188 | HMT_low(); | |
189 | if (SHARED_PROCESSOR) | |
190 | __rw_yield(rw); | |
191 | } while (unlikely(rw->lock < 0)); | |
192 | HMT_medium(); | |
193 | } | |
1da177e4 LT |
194 | } |
195 | ||
fb1c8f93 | 196 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 LT |
197 | { |
198 | while (1) { | |
199 | if (likely(__write_trylock(rw) == 0)) | |
200 | break; | |
201 | do { | |
202 | HMT_low(); | |
203 | if (SHARED_PROCESSOR) | |
204 | __rw_yield(rw); | |
d637413f | 205 | } while (unlikely(rw->lock != 0)); |
1da177e4 LT |
206 | HMT_medium(); |
207 | } | |
208 | } | |
209 | ||
fb1c8f93 IM |
210 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) |
211 | { | |
212 | return __read_trylock(rw) > 0; | |
213 | } | |
214 | ||
215 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | |
216 | { | |
217 | return __write_trylock(rw) == 0; | |
218 | } | |
219 | ||
220 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | |
221 | { | |
222 | long tmp; | |
223 | ||
224 | __asm__ __volatile__( | |
225 | "eieio # read_unlock\n\ | |
226 | 1: lwarx %0,0,%1\n\ | |
227 | addic %0,%0,-1\n\ | |
228 | stwcx. %0,0,%1\n\ | |
229 | bne- 1b" | |
230 | : "=&r"(tmp) | |
231 | : "r"(&rw->lock) | |
232 | : "cr0", "memory"); | |
233 | } | |
234 | ||
235 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | |
236 | { | |
237 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | |
238 | rw->lock = 0; | |
239 | } | |
240 | ||
1da177e4 | 241 | #endif /* __ASM_SPINLOCK_H */ |