[SPARC64]: More fully work around Spitfire Errata 51.
[deliverable/linux.git] / include / asm-sparc64 / spinlock.h
1 /* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
8
9 #include <linux/config.h>
10 #include <linux/threads.h> /* For NR_CPUS */
11
12 #ifndef __ASSEMBLY__
13
14 /* To get debugging spinlocks which detect and catch
15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16 * and rebuild your kernel.
17 */
18
19 /* All of these locking primitives are expected to work properly
20 * even in an RMO memory model, which currently is what the kernel
21 * runs in.
22 *
23 * There is another issue. Because we play games to save cycles
24 * in the non-contention case, we need to be extra careful about
25 * branch targets into the "spinning" code. They live in their
26 * own section, but the newer V9 branches have a shorter range
27 * than the traditional 32-bit sparc branch variants. The rule
28 * is that the branches that go into and out of the spinner sections
29 * must be pre-V9 branches.
30 */
31
32 #ifndef CONFIG_DEBUG_SPINLOCK
33
34 typedef struct {
35 volatile unsigned char lock;
36 #ifdef CONFIG_PREEMPT
37 unsigned int break_lock;
38 #endif
39 } spinlock_t;
40 #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
41
42 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
43 #define spin_is_locked(lp) ((lp)->lock != 0)
44
45 #define spin_unlock_wait(lp) \
46 do { rmb(); \
47 } while((lp)->lock)
48
49 static inline void _raw_spin_lock(spinlock_t *lock)
50 {
51 unsigned long tmp;
52
53 __asm__ __volatile__(
54 "1: ldstub [%1], %0\n"
55 " membar #StoreLoad | #StoreStore\n"
56 " brnz,pn %0, 2f\n"
57 " nop\n"
58 " .subsection 2\n"
59 "2: ldub [%1], %0\n"
60 " membar #LoadLoad\n"
61 " brnz,pt %0, 2b\n"
62 " nop\n"
63 " ba,a,pt %%xcc, 1b\n"
64 " .previous"
65 : "=&r" (tmp)
66 : "r" (lock)
67 : "memory");
68 }
69
70 static inline int _raw_spin_trylock(spinlock_t *lock)
71 {
72 unsigned long result;
73
74 __asm__ __volatile__(
75 " ldstub [%1], %0\n"
76 " membar #StoreLoad | #StoreStore"
77 : "=r" (result)
78 : "r" (lock)
79 : "memory");
80
81 return (result == 0UL);
82 }
83
84 static inline void _raw_spin_unlock(spinlock_t *lock)
85 {
86 __asm__ __volatile__(
87 " membar #StoreStore | #LoadStore\n"
88 " stb %%g0, [%0]"
89 : /* No outputs */
90 : "r" (lock)
91 : "memory");
92 }
93
94 static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
95 {
96 unsigned long tmp1, tmp2;
97
98 __asm__ __volatile__(
99 "1: ldstub [%2], %0\n"
100 " membar #StoreLoad | #StoreStore\n"
101 " brnz,pn %0, 2f\n"
102 " nop\n"
103 " .subsection 2\n"
104 "2: rdpr %%pil, %1\n"
105 " wrpr %3, %%pil\n"
106 "3: ldub [%2], %0\n"
107 " membar #LoadLoad\n"
108 " brnz,pt %0, 3b\n"
109 " nop\n"
110 " ba,pt %%xcc, 1b\n"
111 " wrpr %1, %%pil\n"
112 " .previous"
113 : "=&r" (tmp1), "=&r" (tmp2)
114 : "r"(lock), "r"(flags)
115 : "memory");
116 }
117
118 #else /* !(CONFIG_DEBUG_SPINLOCK) */
119
120 typedef struct {
121 volatile unsigned char lock;
122 unsigned int owner_pc, owner_cpu;
123 #ifdef CONFIG_PREEMPT
124 unsigned int break_lock;
125 #endif
126 } spinlock_t;
127 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
128 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
129 #define spin_is_locked(__lock) ((__lock)->lock != 0)
130 #define spin_unlock_wait(__lock) \
131 do { \
132 rmb(); \
133 } while((__lock)->lock)
134
135 extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
136 extern void _do_spin_unlock(spinlock_t *lock);
137 extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
138
139 #define _raw_spin_trylock(lp) \
140 _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
141 #define _raw_spin_lock(lock) \
142 _do_spin_lock(lock, "spin_lock", \
143 (unsigned long) __builtin_return_address(0))
144 #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
145 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
146
147 #endif /* CONFIG_DEBUG_SPINLOCK */
148
149 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
150
151 #ifndef CONFIG_DEBUG_SPINLOCK
152
153 typedef struct {
154 volatile unsigned int lock;
155 #ifdef CONFIG_PREEMPT
156 unsigned int break_lock;
157 #endif
158 } rwlock_t;
159 #define RW_LOCK_UNLOCKED (rwlock_t) {0,}
160 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
161
162 static void inline __read_lock(rwlock_t *lock)
163 {
164 unsigned long tmp1, tmp2;
165
166 __asm__ __volatile__ (
167 "1: ldsw [%2], %0\n"
168 " brlz,pn %0, 2f\n"
169 "4: add %0, 1, %1\n"
170 " cas [%2], %0, %1\n"
171 " cmp %0, %1\n"
172 " membar #StoreLoad | #StoreStore\n"
173 " bne,pn %%icc, 1b\n"
174 " nop\n"
175 " .subsection 2\n"
176 "2: ldsw [%2], %0\n"
177 " membar #LoadLoad\n"
178 " brlz,pt %0, 2b\n"
179 " nop\n"
180 " ba,a,pt %%xcc, 4b\n"
181 " .previous"
182 : "=&r" (tmp1), "=&r" (tmp2)
183 : "r" (lock)
184 : "memory");
185 }
186
187 static void inline __read_unlock(rwlock_t *lock)
188 {
189 unsigned long tmp1, tmp2;
190
191 __asm__ __volatile__(
192 " membar #StoreLoad | #LoadLoad\n"
193 "1: lduw [%2], %0\n"
194 " sub %0, 1, %1\n"
195 " cas [%2], %0, %1\n"
196 " cmp %0, %1\n"
197 " bne,pn %%xcc, 1b\n"
198 " nop"
199 : "=&r" (tmp1), "=&r" (tmp2)
200 : "r" (lock)
201 : "memory");
202 }
203
204 static void inline __write_lock(rwlock_t *lock)
205 {
206 unsigned long mask, tmp1, tmp2;
207
208 mask = 0x80000000UL;
209
210 __asm__ __volatile__(
211 "1: lduw [%2], %0\n"
212 " brnz,pn %0, 2f\n"
213 "4: or %0, %3, %1\n"
214 " cas [%2], %0, %1\n"
215 " cmp %0, %1\n"
216 " membar #StoreLoad | #StoreStore\n"
217 " bne,pn %%icc, 1b\n"
218 " nop\n"
219 " .subsection 2\n"
220 "2: lduw [%2], %0\n"
221 " membar #LoadLoad\n"
222 " brnz,pt %0, 2b\n"
223 " nop\n"
224 " ba,a,pt %%xcc, 4b\n"
225 " .previous"
226 : "=&r" (tmp1), "=&r" (tmp2)
227 : "r" (lock), "r" (mask)
228 : "memory");
229 }
230
231 static void inline __write_unlock(rwlock_t *lock)
232 {
233 __asm__ __volatile__(
234 " membar #LoadStore | #StoreStore\n"
235 " stw %%g0, [%0]"
236 : /* no outputs */
237 : "r" (lock)
238 : "memory");
239 }
240
241 static int inline __write_trylock(rwlock_t *lock)
242 {
243 unsigned long mask, tmp1, tmp2, result;
244
245 mask = 0x80000000UL;
246
247 __asm__ __volatile__(
248 " mov 0, %2\n"
249 "1: lduw [%3], %0\n"
250 " brnz,pn %0, 2f\n"
251 " or %0, %4, %1\n"
252 " cas [%3], %0, %1\n"
253 " cmp %0, %1\n"
254 " membar #StoreLoad | #StoreStore\n"
255 " bne,pn %%icc, 1b\n"
256 " nop\n"
257 " mov 1, %2\n"
258 "2:"
259 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
260 : "r" (lock), "r" (mask)
261 : "memory");
262
263 return result;
264 }
265
266 #define _raw_read_lock(p) __read_lock(p)
267 #define _raw_read_unlock(p) __read_unlock(p)
268 #define _raw_write_lock(p) __write_lock(p)
269 #define _raw_write_unlock(p) __write_unlock(p)
270 #define _raw_write_trylock(p) __write_trylock(p)
271
272 #else /* !(CONFIG_DEBUG_SPINLOCK) */
273
274 typedef struct {
275 volatile unsigned long lock;
276 unsigned int writer_pc, writer_cpu;
277 unsigned int reader_pc[NR_CPUS];
278 #ifdef CONFIG_PREEMPT
279 unsigned int break_lock;
280 #endif
281 } rwlock_t;
282 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
283 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
284
285 extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
286 extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
287 extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
288 extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
289 extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
290
291 #define _raw_read_lock(lock) \
292 do { unsigned long flags; \
293 local_irq_save(flags); \
294 _do_read_lock(lock, "read_lock", \
295 (unsigned long) __builtin_return_address(0)); \
296 local_irq_restore(flags); \
297 } while(0)
298
299 #define _raw_read_unlock(lock) \
300 do { unsigned long flags; \
301 local_irq_save(flags); \
302 _do_read_unlock(lock, "read_unlock", \
303 (unsigned long) __builtin_return_address(0)); \
304 local_irq_restore(flags); \
305 } while(0)
306
307 #define _raw_write_lock(lock) \
308 do { unsigned long flags; \
309 local_irq_save(flags); \
310 _do_write_lock(lock, "write_lock", \
311 (unsigned long) __builtin_return_address(0)); \
312 local_irq_restore(flags); \
313 } while(0)
314
315 #define _raw_write_unlock(lock) \
316 do { unsigned long flags; \
317 local_irq_save(flags); \
318 _do_write_unlock(lock, \
319 (unsigned long) __builtin_return_address(0)); \
320 local_irq_restore(flags); \
321 } while(0)
322
323 #define _raw_write_trylock(lock) \
324 ({ unsigned long flags; \
325 int val; \
326 local_irq_save(flags); \
327 val = _do_write_trylock(lock, "write_trylock", \
328 (unsigned long) __builtin_return_address(0)); \
329 local_irq_restore(flags); \
330 val; \
331 })
332
333 #endif /* CONFIG_DEBUG_SPINLOCK */
334
335 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
336 #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
337 #define write_can_lock(rw) (!(rw)->lock)
338
339 #endif /* !(__ASSEMBLY__) */
340
341 #endif /* !(__SPARC64_SPINLOCK_H) */
This page took 0.099645 seconds and 5 git commands to generate.