1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
8 #include <linux/prefetch.h>
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
14 #ifdef CONFIG_THUMB2_KERNEL
16 * For Thumb-2, special care is needed to ensure that the conditional WFE
17 * instruction really does assemble to exactly 4 bytes (as required by
18 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
19 * assembler to insert a extra (16-bit) IT instruction, depending on the
20 * presence or absence of neighbouring conditional instructions.
22 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
23 * the assembler won't change IT instructions which are explicitly present
26 #define WFE(cond) __ALT_SMP_ASM( \
33 #define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
36 #define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
38 static inline void dsb_sev(void)
40 #if __LINUX_ARM_ARCH__ >= 7
41 __asm__
__volatile__ (
46 __asm__
__volatile__ (
47 "mcr p15, 0, %0, c7, c10, 4\n"
55 * ARMv6 ticket-based spin-locking.
57 * A memory barrier is required after we get a lock, and before we
58 * release it, because V6 CPUs are assumed to have weakly ordered
62 #define arch_spin_unlock_wait(lock) \
63 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
65 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
67 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
71 arch_spinlock_t lockval
;
73 prefetchw(&lock
->slock
);
77 " strex %2, %1, [%3]\n"
80 : "=&r" (lockval
), "=&r" (newval
), "=&r" (tmp
)
81 : "r" (&lock
->slock
), "I" (1 << TICKET_SHIFT
)
84 while (lockval
.tickets
.next
!= lockval
.tickets
.owner
) {
86 lockval
.tickets
.owner
= ACCESS_ONCE(lock
->tickets
.owner
);
92 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
94 unsigned long contended
, res
;
97 prefetchw(&lock
->slock
);
102 " subs %1, %0, %0, ror #16\n"
103 " addeq %0, %0, %4\n"
104 " strexeq %2, %0, [%3]"
105 : "=&r" (slock
), "=&r" (contended
), "=&r" (res
)
106 : "r" (&lock
->slock
), "I" (1 << TICKET_SHIFT
)
118 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
121 lock
->tickets
.owner
++;
125 static inline int arch_spin_value_unlocked(arch_spinlock_t lock
)
127 return lock
.tickets
.owner
== lock
.tickets
.next
;
130 static inline int arch_spin_is_locked(arch_spinlock_t
*lock
)
132 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock
));
135 static inline int arch_spin_is_contended(arch_spinlock_t
*lock
)
137 struct __raw_tickets tickets
= ACCESS_ONCE(lock
->tickets
);
138 return (tickets
.next
- tickets
.owner
) > 1;
140 #define arch_spin_is_contended arch_spin_is_contended
146 * Write locks are easy - we just set bit 31. When unlocking, we can
147 * just write zero since the lock is exclusively held.
150 static inline void arch_write_lock(arch_rwlock_t
*rw
)
154 prefetchw(&rw
->lock
);
155 __asm__
__volatile__(
156 "1: ldrex %0, [%1]\n"
159 " strexeq %0, %2, [%1]\n"
163 : "r" (&rw
->lock
), "r" (0x80000000)
169 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
171 unsigned long contended
, res
;
173 prefetchw(&rw
->lock
);
175 __asm__
__volatile__(
179 " strexeq %1, %3, [%2]"
180 : "=&r" (contended
), "=&r" (res
)
181 : "r" (&rw
->lock
), "r" (0x80000000)
193 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
197 __asm__
__volatile__(
200 : "r" (&rw
->lock
), "r" (0)
206 /* write_can_lock - would write_trylock() succeed? */
207 #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
210 * Read locks are a bit more hairy:
211 * - Exclusively load the lock value.
213 * - Store new lock value if positive, and we still own this location.
214 * If the value is negative, we've already failed.
215 * - If we failed to store the value, we want a negative result.
216 * - If we failed, try again.
217 * Unlocking is similarly hairy. We may have multiple read locks
218 * currently active. However, we know we won't have any write
221 static inline void arch_read_lock(arch_rwlock_t
*rw
)
223 unsigned long tmp
, tmp2
;
225 prefetchw(&rw
->lock
);
226 __asm__
__volatile__(
227 "1: ldrex %0, [%2]\n"
229 " strexpl %1, %0, [%2]\n"
231 " rsbpls %0, %1, #0\n"
233 : "=&r" (tmp
), "=&r" (tmp2
)
240 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
242 unsigned long tmp
, tmp2
;
246 prefetchw(&rw
->lock
);
247 __asm__
__volatile__(
248 "1: ldrex %0, [%2]\n"
250 " strex %1, %0, [%2]\n"
253 : "=&r" (tmp
), "=&r" (tmp2
)
261 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
263 unsigned long contended
, res
;
265 prefetchw(&rw
->lock
);
267 __asm__
__volatile__(
271 " strexpl %1, %0, [%2]"
272 : "=&r" (contended
), "=&r" (res
)
277 /* If the lock is negative, then it is already held for write. */
278 if (contended
< 0x80000000) {
286 /* read_can_lock - would read_trylock() succeed? */
287 #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
289 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
290 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
292 #define arch_spin_relax(lock) cpu_relax()
293 #define arch_read_relax(lock) cpu_relax()
294 #define arch_write_relax(lock) cpu_relax()
296 #endif /* __ASM_SPINLOCK_H */
This page took 0.036193 seconds and 5 git commands to generate.