Merge branch 'pci/resource' into next
[deliverable/linux.git] / arch / arm / include / asm / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
6 #endif
7
8 #include <linux/prefetch.h>
9
10 /*
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
13 */
14 #ifdef CONFIG_THUMB2_KERNEL
15 /*
16 * For Thumb-2, special care is needed to ensure that the conditional WFE
17 * instruction really does assemble to exactly 4 bytes (as required by
18 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
19 * assembler to insert a extra (16-bit) IT instruction, depending on the
20 * presence or absence of neighbouring conditional instructions.
21 *
22 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
23 * the assembler won't change IT instructions which are explicitly present
24 * in the input.
25 */
26 #define WFE(cond) __ALT_SMP_ASM( \
27 "it " cond "\n\t" \
28 "wfe" cond ".n", \
29 \
30 "nop.w" \
31 )
32 #else
33 #define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
34 #endif
35
36 #define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
37
38 static inline void dsb_sev(void)
39 {
40 #if __LINUX_ARM_ARCH__ >= 7
41 __asm__ __volatile__ (
42 "dsb ishst\n"
43 SEV
44 );
45 #else
46 __asm__ __volatile__ (
47 "mcr p15, 0, %0, c7, c10, 4\n"
48 SEV
49 : : "r" (0)
50 );
51 #endif
52 }
53
54 /*
55 * ARMv6 ticket-based spin-locking.
56 *
57 * A memory barrier is required after we get a lock, and before we
58 * release it, because V6 CPUs are assumed to have weakly ordered
59 * memory.
60 */
61
62 #define arch_spin_unlock_wait(lock) \
63 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
64
65 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
66
67 static inline void arch_spin_lock(arch_spinlock_t *lock)
68 {
69 unsigned long tmp;
70 u32 newval;
71 arch_spinlock_t lockval;
72
73 prefetchw(&lock->slock);
74 __asm__ __volatile__(
75 "1: ldrex %0, [%3]\n"
76 " add %1, %0, %4\n"
77 " strex %2, %1, [%3]\n"
78 " teq %2, #0\n"
79 " bne 1b"
80 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
81 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
82 : "cc");
83
84 while (lockval.tickets.next != lockval.tickets.owner) {
85 wfe();
86 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
87 }
88
89 smp_mb();
90 }
91
92 static inline int arch_spin_trylock(arch_spinlock_t *lock)
93 {
94 unsigned long contended, res;
95 u32 slock;
96
97 prefetchw(&lock->slock);
98 do {
99 __asm__ __volatile__(
100 " ldrex %0, [%3]\n"
101 " mov %2, #0\n"
102 " subs %1, %0, %0, ror #16\n"
103 " addeq %0, %0, %4\n"
104 " strexeq %2, %0, [%3]"
105 : "=&r" (slock), "=&r" (contended), "=&r" (res)
106 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
107 : "cc");
108 } while (res);
109
110 if (!contended) {
111 smp_mb();
112 return 1;
113 } else {
114 return 0;
115 }
116 }
117
118 static inline void arch_spin_unlock(arch_spinlock_t *lock)
119 {
120 smp_mb();
121 lock->tickets.owner++;
122 dsb_sev();
123 }
124
125 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
126 {
127 return lock.tickets.owner == lock.tickets.next;
128 }
129
130 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131 {
132 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
133 }
134
135 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
136 {
137 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
138 return (tickets.next - tickets.owner) > 1;
139 }
140 #define arch_spin_is_contended arch_spin_is_contended
141
142 /*
143 * RWLOCKS
144 *
145 *
146 * Write locks are easy - we just set bit 31. When unlocking, we can
147 * just write zero since the lock is exclusively held.
148 */
149
150 static inline void arch_write_lock(arch_rwlock_t *rw)
151 {
152 unsigned long tmp;
153
154 prefetchw(&rw->lock);
155 __asm__ __volatile__(
156 "1: ldrex %0, [%1]\n"
157 " teq %0, #0\n"
158 WFE("ne")
159 " strexeq %0, %2, [%1]\n"
160 " teq %0, #0\n"
161 " bne 1b"
162 : "=&r" (tmp)
163 : "r" (&rw->lock), "r" (0x80000000)
164 : "cc");
165
166 smp_mb();
167 }
168
169 static inline int arch_write_trylock(arch_rwlock_t *rw)
170 {
171 unsigned long contended, res;
172
173 prefetchw(&rw->lock);
174 do {
175 __asm__ __volatile__(
176 " ldrex %0, [%2]\n"
177 " mov %1, #0\n"
178 " teq %0, #0\n"
179 " strexeq %1, %3, [%2]"
180 : "=&r" (contended), "=&r" (res)
181 : "r" (&rw->lock), "r" (0x80000000)
182 : "cc");
183 } while (res);
184
185 if (!contended) {
186 smp_mb();
187 return 1;
188 } else {
189 return 0;
190 }
191 }
192
193 static inline void arch_write_unlock(arch_rwlock_t *rw)
194 {
195 smp_mb();
196
197 __asm__ __volatile__(
198 "str %1, [%0]\n"
199 :
200 : "r" (&rw->lock), "r" (0)
201 : "cc");
202
203 dsb_sev();
204 }
205
206 /* write_can_lock - would write_trylock() succeed? */
207 #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
208
209 /*
210 * Read locks are a bit more hairy:
211 * - Exclusively load the lock value.
212 * - Increment it.
213 * - Store new lock value if positive, and we still own this location.
214 * If the value is negative, we've already failed.
215 * - If we failed to store the value, we want a negative result.
216 * - If we failed, try again.
217 * Unlocking is similarly hairy. We may have multiple read locks
218 * currently active. However, we know we won't have any write
219 * locks.
220 */
221 static inline void arch_read_lock(arch_rwlock_t *rw)
222 {
223 unsigned long tmp, tmp2;
224
225 prefetchw(&rw->lock);
226 __asm__ __volatile__(
227 "1: ldrex %0, [%2]\n"
228 " adds %0, %0, #1\n"
229 " strexpl %1, %0, [%2]\n"
230 WFE("mi")
231 " rsbpls %0, %1, #0\n"
232 " bmi 1b"
233 : "=&r" (tmp), "=&r" (tmp2)
234 : "r" (&rw->lock)
235 : "cc");
236
237 smp_mb();
238 }
239
240 static inline void arch_read_unlock(arch_rwlock_t *rw)
241 {
242 unsigned long tmp, tmp2;
243
244 smp_mb();
245
246 prefetchw(&rw->lock);
247 __asm__ __volatile__(
248 "1: ldrex %0, [%2]\n"
249 " sub %0, %0, #1\n"
250 " strex %1, %0, [%2]\n"
251 " teq %1, #0\n"
252 " bne 1b"
253 : "=&r" (tmp), "=&r" (tmp2)
254 : "r" (&rw->lock)
255 : "cc");
256
257 if (tmp == 0)
258 dsb_sev();
259 }
260
261 static inline int arch_read_trylock(arch_rwlock_t *rw)
262 {
263 unsigned long contended, res;
264
265 prefetchw(&rw->lock);
266 do {
267 __asm__ __volatile__(
268 " ldrex %0, [%2]\n"
269 " mov %1, #0\n"
270 " adds %0, %0, #1\n"
271 " strexpl %1, %0, [%2]"
272 : "=&r" (contended), "=&r" (res)
273 : "r" (&rw->lock)
274 : "cc");
275 } while (res);
276
277 /* If the lock is negative, then it is already held for write. */
278 if (contended < 0x80000000) {
279 smp_mb();
280 return 1;
281 } else {
282 return 0;
283 }
284 }
285
286 /* read_can_lock - would read_trylock() succeed? */
287 #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
288
289 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
290 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
291
292 #define arch_spin_relax(lock) cpu_relax()
293 #define arch_read_relax(lock) cpu_relax()
294 #define arch_write_relax(lock) cpu_relax()
295
296 #endif /* __ASM_SPINLOCK_H */
This page took 0.036193 seconds and 5 git commands to generate.