Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1999, 2000 by Ralf Baechle | |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
8 | */ | |
9 | #ifndef _ASM_SPINLOCK_H | |
10 | #define _ASM_SPINLOCK_H | |
11 | ||
12 | #include <linux/config.h> | |
13 | #include <asm/war.h> | |
14 | ||
15 | /* | |
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
17 | */ | |
18 | ||
fb1c8f93 IM |
19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
21 | #define __raw_spin_unlock_wait(x) \ | |
22 | do { cpu_relax(); } while ((x)->lock) | |
1da177e4 LT |
23 | |
24 | /* | |
25 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
26 | * on the local processor, one does not. | |
27 | * | |
28 | * We make no fairness assumptions. They have a cost. | |
29 | */ | |
30 | ||
fb1c8f93 | 31 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
32 | { |
33 | unsigned int tmp; | |
34 | ||
35 | if (R10000_LLSC_WAR) { | |
36 | __asm__ __volatile__( | |
fb1c8f93 | 37 | " .set noreorder # __raw_spin_lock \n" |
1da177e4 LT |
38 | "1: ll %1, %2 \n" |
39 | " bnez %1, 1b \n" | |
40 | " li %1, 1 \n" | |
41 | " sc %1, %0 \n" | |
42 | " beqzl %1, 1b \n" | |
43 | " nop \n" | |
44 | " sync \n" | |
45 | " .set reorder \n" | |
46 | : "=m" (lock->lock), "=&r" (tmp) | |
47 | : "m" (lock->lock) | |
48 | : "memory"); | |
49 | } else { | |
50 | __asm__ __volatile__( | |
fb1c8f93 | 51 | " .set noreorder # __raw_spin_lock \n" |
1da177e4 LT |
52 | "1: ll %1, %2 \n" |
53 | " bnez %1, 1b \n" | |
54 | " li %1, 1 \n" | |
55 | " sc %1, %0 \n" | |
56 | " beqz %1, 1b \n" | |
57 | " sync \n" | |
58 | " .set reorder \n" | |
59 | : "=m" (lock->lock), "=&r" (tmp) | |
60 | : "m" (lock->lock) | |
61 | : "memory"); | |
62 | } | |
63 | } | |
64 | ||
fb1c8f93 | 65 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 LT |
66 | { |
67 | __asm__ __volatile__( | |
fb1c8f93 | 68 | " .set noreorder # __raw_spin_unlock \n" |
1da177e4 LT |
69 | " sync \n" |
70 | " sw $0, %0 \n" | |
71 | " .set\treorder \n" | |
72 | : "=m" (lock->lock) | |
73 | : "m" (lock->lock) | |
74 | : "memory"); | |
75 | } | |
76 | ||
fb1c8f93 | 77 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 LT |
78 | { |
79 | unsigned int temp, res; | |
80 | ||
81 | if (R10000_LLSC_WAR) { | |
82 | __asm__ __volatile__( | |
fb1c8f93 | 83 | " .set noreorder # __raw_spin_trylock \n" |
1da177e4 LT |
84 | "1: ll %0, %3 \n" |
85 | " ori %2, %0, 1 \n" | |
86 | " sc %2, %1 \n" | |
87 | " beqzl %2, 1b \n" | |
88 | " nop \n" | |
89 | " andi %2, %0, 1 \n" | |
90 | " sync \n" | |
91 | " .set reorder" | |
92 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | |
93 | : "m" (lock->lock) | |
94 | : "memory"); | |
95 | } else { | |
96 | __asm__ __volatile__( | |
fb1c8f93 | 97 | " .set noreorder # __raw_spin_trylock \n" |
1da177e4 LT |
98 | "1: ll %0, %3 \n" |
99 | " ori %2, %0, 1 \n" | |
100 | " sc %2, %1 \n" | |
101 | " beqz %2, 1b \n" | |
102 | " andi %2, %0, 1 \n" | |
103 | " sync \n" | |
104 | " .set reorder" | |
105 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | |
106 | : "m" (lock->lock) | |
107 | : "memory"); | |
108 | } | |
109 | ||
110 | return res == 0; | |
111 | } | |
112 | ||
113 | /* | |
114 | * Read-write spinlocks, allowing multiple readers but only one writer. | |
115 | * | |
116 | * NOTE! it is quite common to have readers in interrupts but no interrupt | |
117 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | |
118 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | |
119 | * read-locks. | |
120 | */ | |
121 | ||
fb1c8f93 | 122 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 LT |
123 | { |
124 | unsigned int tmp; | |
125 | ||
126 | if (R10000_LLSC_WAR) { | |
127 | __asm__ __volatile__( | |
fb1c8f93 | 128 | " .set noreorder # __raw_read_lock \n" |
1da177e4 LT |
129 | "1: ll %1, %2 \n" |
130 | " bltz %1, 1b \n" | |
131 | " addu %1, 1 \n" | |
132 | " sc %1, %0 \n" | |
133 | " beqzl %1, 1b \n" | |
134 | " nop \n" | |
135 | " sync \n" | |
136 | " .set reorder \n" | |
137 | : "=m" (rw->lock), "=&r" (tmp) | |
138 | : "m" (rw->lock) | |
139 | : "memory"); | |
140 | } else { | |
141 | __asm__ __volatile__( | |
fb1c8f93 | 142 | " .set noreorder # __raw_read_lock \n" |
1da177e4 LT |
143 | "1: ll %1, %2 \n" |
144 | " bltz %1, 1b \n" | |
145 | " addu %1, 1 \n" | |
146 | " sc %1, %0 \n" | |
147 | " beqz %1, 1b \n" | |
148 | " sync \n" | |
149 | " .set reorder \n" | |
150 | : "=m" (rw->lock), "=&r" (tmp) | |
151 | : "m" (rw->lock) | |
152 | : "memory"); | |
153 | } | |
154 | } | |
155 | ||
156 | /* Note the use of sub, not subu which will make the kernel die with an | |
157 | overflow exception if we ever try to unlock an rwlock that is already | |
158 | unlocked or is being held by a writer. */ | |
fb1c8f93 | 159 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
160 | { |
161 | unsigned int tmp; | |
162 | ||
163 | if (R10000_LLSC_WAR) { | |
164 | __asm__ __volatile__( | |
fb1c8f93 | 165 | "1: ll %1, %2 # __raw_read_unlock \n" |
1da177e4 LT |
166 | " sub %1, 1 \n" |
167 | " sc %1, %0 \n" | |
168 | " beqzl %1, 1b \n" | |
169 | " sync \n" | |
170 | : "=m" (rw->lock), "=&r" (tmp) | |
171 | : "m" (rw->lock) | |
172 | : "memory"); | |
173 | } else { | |
174 | __asm__ __volatile__( | |
fb1c8f93 | 175 | " .set noreorder # __raw_read_unlock \n" |
1da177e4 LT |
176 | "1: ll %1, %2 \n" |
177 | " sub %1, 1 \n" | |
178 | " sc %1, %0 \n" | |
179 | " beqz %1, 1b \n" | |
180 | " sync \n" | |
181 | " .set reorder \n" | |
182 | : "=m" (rw->lock), "=&r" (tmp) | |
183 | : "m" (rw->lock) | |
184 | : "memory"); | |
185 | } | |
186 | } | |
187 | ||
fb1c8f93 | 188 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 LT |
189 | { |
190 | unsigned int tmp; | |
191 | ||
192 | if (R10000_LLSC_WAR) { | |
193 | __asm__ __volatile__( | |
fb1c8f93 | 194 | " .set noreorder # __raw_write_lock \n" |
1da177e4 LT |
195 | "1: ll %1, %2 \n" |
196 | " bnez %1, 1b \n" | |
197 | " lui %1, 0x8000 \n" | |
198 | " sc %1, %0 \n" | |
199 | " beqzl %1, 1b \n" | |
200 | " nop \n" | |
201 | " sync \n" | |
202 | " .set reorder \n" | |
203 | : "=m" (rw->lock), "=&r" (tmp) | |
204 | : "m" (rw->lock) | |
205 | : "memory"); | |
206 | } else { | |
207 | __asm__ __volatile__( | |
fb1c8f93 | 208 | " .set noreorder # __raw_write_lock \n" |
1da177e4 LT |
209 | "1: ll %1, %2 \n" |
210 | " bnez %1, 1b \n" | |
211 | " lui %1, 0x8000 \n" | |
212 | " sc %1, %0 \n" | |
213 | " beqz %1, 1b \n" | |
214 | " nop \n" | |
215 | " sync \n" | |
216 | " .set reorder \n" | |
217 | : "=m" (rw->lock), "=&r" (tmp) | |
218 | : "m" (rw->lock) | |
219 | : "memory"); | |
220 | } | |
221 | } | |
222 | ||
fb1c8f93 | 223 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
224 | { |
225 | __asm__ __volatile__( | |
fb1c8f93 | 226 | " sync # __raw_write_unlock \n" |
1da177e4 LT |
227 | " sw $0, %0 \n" |
228 | : "=m" (rw->lock) | |
229 | : "m" (rw->lock) | |
230 | : "memory"); | |
231 | } | |
232 | ||
fb1c8f93 | 233 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
1da177e4 | 234 | |
fb1c8f93 | 235 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
1da177e4 LT |
236 | { |
237 | unsigned int tmp; | |
238 | int ret; | |
239 | ||
240 | if (R10000_LLSC_WAR) { | |
241 | __asm__ __volatile__( | |
fb1c8f93 | 242 | " .set noreorder # __raw_write_trylock \n" |
1da177e4 LT |
243 | " li %2, 0 \n" |
244 | "1: ll %1, %3 \n" | |
245 | " bnez %1, 2f \n" | |
246 | " lui %1, 0x8000 \n" | |
247 | " sc %1, %0 \n" | |
248 | " beqzl %1, 1b \n" | |
249 | " nop \n" | |
250 | " sync \n" | |
251 | " li %2, 1 \n" | |
252 | " .set reorder \n" | |
253 | "2: \n" | |
254 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | |
255 | : "m" (rw->lock) | |
256 | : "memory"); | |
257 | } else { | |
258 | __asm__ __volatile__( | |
fb1c8f93 | 259 | " .set noreorder # __raw_write_trylock \n" |
1da177e4 LT |
260 | " li %2, 0 \n" |
261 | "1: ll %1, %3 \n" | |
262 | " bnez %1, 2f \n" | |
263 | " lui %1, 0x8000 \n" | |
264 | " sc %1, %0 \n" | |
265 | " beqz %1, 1b \n" | |
266 | " sync \n" | |
267 | " li %2, 1 \n" | |
268 | " .set reorder \n" | |
269 | "2: \n" | |
270 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | |
271 | : "m" (rw->lock) | |
272 | : "memory"); | |
273 | } | |
274 | ||
275 | return ret; | |
276 | } | |
277 | ||
278 | #endif /* _ASM_SPINLOCK_H */ |