1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
8 #include <asm/processor.h>
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
14 #define ALT_SMP(smp, up) \
16 " .pushsection \".alt.smp.init\", \"a\"\n" \
21 #ifdef CONFIG_THUMB2_KERNEL
22 #define SEV ALT_SMP("sev.w", "nop.w")
24 * For Thumb-2, special care is needed to ensure that the conditional WFE
25 * instruction really does assemble to exactly 4 bytes (as required by
26 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
27 * assembler to insert a extra (16-bit) IT instruction, depending on the
28 * presence or absence of neighbouring conditional instructions.
30 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
31 * the assembler won't change IT instructions which are explicitly present
34 #define WFE(cond) ALT_SMP( \
41 #define SEV ALT_SMP("sev", "nop")
42 #define WFE(cond) ALT_SMP("wfe" cond, "nop")
45 static inline void dsb_sev(void)
47 #if __LINUX_ARM_ARCH__ >= 7
48 __asm__
__volatile__ (
53 __asm__
__volatile__ (
54 "mcr p15, 0, %0, c7, c10, 4\n"
62 * ARMv6 ticket-based spin-locking.
64 * A memory barrier is required after we get a lock, and before we
65 * release it, because V6 CPUs are assumed to have weakly ordered
69 #define arch_spin_unlock_wait(lock) \
70 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
72 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
74 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
78 arch_spinlock_t lockval
;
83 " strex %2, %1, [%3]\n"
86 : "=&r" (lockval
), "=&r" (newval
), "=&r" (tmp
)
87 : "r" (&lock
->slock
), "I" (1 << TICKET_SHIFT
)
90 while (lockval
.tickets
.next
!= lockval
.tickets
.owner
) {
92 lockval
.tickets
.owner
= ACCESS_ONCE(lock
->tickets
.owner
);
98 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
100 unsigned long contended
, res
;
104 __asm__
__volatile__(
107 " subs %1, %0, %0, ror #16\n"
108 " addeq %0, %0, %4\n"
109 " strexeq %2, %0, [%3]"
110 : "=&r" (slock
), "=&r" (contended
), "=&r" (res
)
111 : "r" (&lock
->slock
), "I" (1 << TICKET_SHIFT
)
123 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
126 lock
->tickets
.owner
++;
130 static inline int arch_spin_value_unlocked(arch_spinlock_t lock
)
132 return lock
.tickets
.owner
== lock
.tickets
.next
;
135 static inline int arch_spin_is_locked(arch_spinlock_t
*lock
)
137 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock
));
140 static inline int arch_spin_is_contended(arch_spinlock_t
*lock
)
142 struct __raw_tickets tickets
= ACCESS_ONCE(lock
->tickets
);
143 return (tickets
.next
- tickets
.owner
) > 1;
145 #define arch_spin_is_contended arch_spin_is_contended
151 * Write locks are easy - we just set bit 31. When unlocking, we can
152 * just write zero since the lock is exclusively held.
155 static inline void arch_write_lock(arch_rwlock_t
*rw
)
159 __asm__
__volatile__(
160 "1: ldrex %0, [%1]\n"
163 " strexeq %0, %2, [%1]\n"
167 : "r" (&rw
->lock
), "r" (0x80000000)
173 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
175 unsigned long contended
, res
;
178 __asm__
__volatile__(
182 " strexeq %1, %3, [%2]"
183 : "=&r" (contended
), "=&r" (res
)
184 : "r" (&rw
->lock
), "r" (0x80000000)
196 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
200 __asm__
__volatile__(
203 : "r" (&rw
->lock
), "r" (0)
209 /* write_can_lock - would write_trylock() succeed? */
210 #define arch_write_can_lock(x) ((x)->lock == 0)
213 * Read locks are a bit more hairy:
214 * - Exclusively load the lock value.
216 * - Store new lock value if positive, and we still own this location.
217 * If the value is negative, we've already failed.
218 * - If we failed to store the value, we want a negative result.
219 * - If we failed, try again.
220 * Unlocking is similarly hairy. We may have multiple read locks
221 * currently active. However, we know we won't have any write
224 static inline void arch_read_lock(arch_rwlock_t
*rw
)
226 unsigned long tmp
, tmp2
;
228 __asm__
__volatile__(
229 "1: ldrex %0, [%2]\n"
231 " strexpl %1, %0, [%2]\n"
233 " rsbpls %0, %1, #0\n"
235 : "=&r" (tmp
), "=&r" (tmp2
)
242 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
244 unsigned long tmp
, tmp2
;
248 __asm__
__volatile__(
249 "1: ldrex %0, [%2]\n"
251 " strex %1, %0, [%2]\n"
254 : "=&r" (tmp
), "=&r" (tmp2
)
262 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
264 unsigned long contended
, res
;
267 __asm__
__volatile__(
271 " strexpl %1, %0, [%2]"
272 : "=&r" (contended
), "=&r" (res
)
277 /* If the lock is negative, then it is already held for write. */
278 if (contended
< 0x80000000) {
286 /* read_can_lock - would read_trylock() succeed? */
287 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
289 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
290 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
292 #define arch_spin_relax(lock) cpu_relax()
293 #define arch_read_relax(lock) cpu_relax()
294 #define arch_write_relax(lock) cpu_relax()
296 #endif /* __ASM_SPINLOCK_H */
This page took 0.04905 seconds and 5 git commands to generate.