1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
11 * We (exclusively) read the old value, and decrement it. If it
12 * hits zero, we may have won the lock, so we try (exclusively)
19 volatile unsigned int lock
;
21 unsigned int break_lock
;
25 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
27 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
28 #define spin_is_locked(x) ((x)->lock != 0)
29 #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
30 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
32 static inline void _raw_spin_lock(spinlock_t
*lock
)
39 " strexeq %0, %2, [%1]\n"
43 : "r" (&lock
->lock
), "r" (1)
47 static inline int _raw_spin_trylock(spinlock_t
*lock
)
54 " strexeq %0, %2, [%1]"
56 : "r" (&lock
->lock
), "r" (1)
62 static inline void _raw_spin_unlock(spinlock_t
*lock
)
67 : "r" (&lock
->lock
), "r" (0)
75 volatile unsigned int lock
;
77 unsigned int break_lock
;
81 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
82 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
83 #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
86 * Write locks are easy - we just set bit 31. When unlocking, we can
87 * just write zero since the lock is exclusively held.
89 static inline void _raw_write_lock(rwlock_t
*rw
)
96 " strexeq %0, %2, [%1]\n"
100 : "r" (&rw
->lock
), "r" (0x80000000)
104 static inline int _raw_write_trylock(rwlock_t
*rw
)
108 __asm__
__volatile__(
109 "1: ldrex %0, [%1]\n"
111 " strexeq %0, %2, [%1]"
113 : "r" (&rw
->lock
), "r" (0x80000000)
119 static inline void _raw_write_unlock(rwlock_t
*rw
)
121 __asm__
__volatile__(
124 : "r" (&rw
->lock
), "r" (0)
129 * Read locks are a bit more hairy:
130 * - Exclusively load the lock value.
132 * - Store new lock value if positive, and we still own this location.
133 * If the value is negative, we've already failed.
134 * - If we failed to store the value, we want a negative result.
135 * - If we failed, try again.
136 * Unlocking is similarly hairy. We may have multiple read locks
137 * currently active. However, we know we won't have any write
140 static inline void _raw_read_lock(rwlock_t
*rw
)
142 unsigned long tmp
, tmp2
;
144 __asm__
__volatile__(
145 "1: ldrex %0, [%2]\n"
147 " strexpl %1, %0, [%2]\n"
148 " rsbpls %0, %1, #0\n"
150 : "=&r" (tmp
), "=&r" (tmp2
)
155 static inline void _raw_read_unlock(rwlock_t
*rw
)
157 unsigned long tmp
, tmp2
;
159 __asm__
__volatile__(
160 "1: ldrex %0, [%2]\n"
162 " strex %1, %0, [%2]\n"
165 : "=&r" (tmp
), "=&r" (tmp2
)
170 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
172 #endif /* __ASM_SPINLOCK_H */
This page took 0.037743 seconds and 5 git commands to generate.