Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #if __LINUX_ARM_ARCH__ < 6 | |
5 | #error SMP not supported on pre-ARMv6 CPUs | |
6 | #endif | |
7 | ||
8 | /* | |
9 | * ARMv6 Spin-locking. | |
10 | * | |
6d9b37a3 RK |
11 | * We exclusively read the old value. If it is zero, we may have |
12 | * won the lock, so we try exclusively storing it. A memory barrier | |
13 | * is required after we get a lock, and before we release it, because | |
14 | * V6 CPUs are assumed to have weakly ordered memory. | |
1da177e4 LT |
15 | * |
16 | * Unlocked value: 0 | |
17 | * Locked value: 1 | |
18 | */ | |
1da177e4 | 19 | |
fb1c8f93 IM |
20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
21 | #define __raw_spin_unlock_wait(lock) \ | |
22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
1da177e4 | 23 | |
fb1c8f93 | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
1da177e4 | 25 | |
fb1c8f93 | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
27 | { |
28 | unsigned long tmp; | |
29 | ||
30 | __asm__ __volatile__( | |
31 | "1: ldrex %0, [%1]\n" | |
32 | " teq %0, #0\n" | |
33 | " strexeq %0, %2, [%1]\n" | |
34 | " teqeq %0, #0\n" | |
35 | " bne 1b" | |
36 | : "=&r" (tmp) | |
37 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
38 | : "cc"); |
39 | ||
40 | smp_mb(); | |
1da177e4 LT |
41 | } |
42 | ||
fb1c8f93 | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 LT |
44 | { |
45 | unsigned long tmp; | |
46 | ||
47 | __asm__ __volatile__( | |
48 | " ldrex %0, [%1]\n" | |
49 | " teq %0, #0\n" | |
50 | " strexeq %0, %2, [%1]" | |
51 | : "=&r" (tmp) | |
52 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
53 | : "cc"); |
54 | ||
55 | if (tmp == 0) { | |
56 | smp_mb(); | |
57 | return 1; | |
58 | } else { | |
59 | return 0; | |
60 | } | |
1da177e4 LT |
61 | } |
62 | ||
fb1c8f93 | 63 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 | 64 | { |
6d9b37a3 RK |
65 | smp_mb(); |
66 | ||
1da177e4 LT |
67 | __asm__ __volatile__( |
68 | " str %1, [%0]" | |
69 | : | |
70 | : "r" (&lock->lock), "r" (0) | |
6d9b37a3 | 71 | : "cc"); |
1da177e4 LT |
72 | } |
73 | ||
74 | /* | |
75 | * RWLOCKS | |
fb1c8f93 IM |
76 | * |
77 | * | |
1da177e4 LT |
78 | * Write locks are easy - we just set bit 31. When unlocking, we can |
79 | * just write zero since the lock is exclusively held. | |
80 | */ | |
fb1c8f93 IM |
81 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
82 | ||
83 | static inline void __raw_write_lock(rwlock_t *rw) | |
1da177e4 LT |
84 | { |
85 | unsigned long tmp; | |
86 | ||
87 | __asm__ __volatile__( | |
88 | "1: ldrex %0, [%1]\n" | |
89 | " teq %0, #0\n" | |
90 | " strexeq %0, %2, [%1]\n" | |
91 | " teq %0, #0\n" | |
92 | " bne 1b" | |
93 | : "=&r" (tmp) | |
94 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
95 | : "cc"); |
96 | ||
97 | smp_mb(); | |
1da177e4 LT |
98 | } |
99 | ||
fb1c8f93 | 100 | static inline int __raw_write_trylock(rwlock_t *rw) |
4e8fd22b RK |
101 | { |
102 | unsigned long tmp; | |
103 | ||
104 | __asm__ __volatile__( | |
105 | "1: ldrex %0, [%1]\n" | |
106 | " teq %0, #0\n" | |
107 | " strexeq %0, %2, [%1]" | |
108 | : "=&r" (tmp) | |
109 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
110 | : "cc"); |
111 | ||
112 | if (tmp == 0) { | |
113 | smp_mb(); | |
114 | return 1; | |
115 | } else { | |
116 | return 0; | |
117 | } | |
4e8fd22b RK |
118 | } |
119 | ||
fb1c8f93 | 120 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
1da177e4 | 121 | { |
6d9b37a3 RK |
122 | smp_mb(); |
123 | ||
1da177e4 LT |
124 | __asm__ __volatile__( |
125 | "str %1, [%0]" | |
126 | : | |
127 | : "r" (&rw->lock), "r" (0) | |
6d9b37a3 | 128 | : "cc"); |
1da177e4 LT |
129 | } |
130 | ||
131 | /* | |
132 | * Read locks are a bit more hairy: | |
133 | * - Exclusively load the lock value. | |
134 | * - Increment it. | |
135 | * - Store new lock value if positive, and we still own this location. | |
136 | * If the value is negative, we've already failed. | |
137 | * - If we failed to store the value, we want a negative result. | |
138 | * - If we failed, try again. | |
139 | * Unlocking is similarly hairy. We may have multiple read locks | |
140 | * currently active. However, we know we won't have any write | |
141 | * locks. | |
142 | */ | |
fb1c8f93 | 143 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 LT |
144 | { |
145 | unsigned long tmp, tmp2; | |
146 | ||
147 | __asm__ __volatile__( | |
148 | "1: ldrex %0, [%2]\n" | |
149 | " adds %0, %0, #1\n" | |
150 | " strexpl %1, %0, [%2]\n" | |
151 | " rsbpls %0, %1, #0\n" | |
152 | " bmi 1b" | |
153 | : "=&r" (tmp), "=&r" (tmp2) | |
154 | : "r" (&rw->lock) | |
6d9b37a3 RK |
155 | : "cc"); |
156 | ||
157 | smp_mb(); | |
1da177e4 LT |
158 | } |
159 | ||
fb1c8f93 | 160 | static inline void __raw_read_unlock(rwlock_t *rw) |
1da177e4 | 161 | { |
4e8fd22b RK |
162 | unsigned long tmp, tmp2; |
163 | ||
6d9b37a3 RK |
164 | smp_mb(); |
165 | ||
1da177e4 LT |
166 | __asm__ __volatile__( |
167 | "1: ldrex %0, [%2]\n" | |
168 | " sub %0, %0, #1\n" | |
169 | " strex %1, %0, [%2]\n" | |
170 | " teq %1, #0\n" | |
171 | " bne 1b" | |
172 | : "=&r" (tmp), "=&r" (tmp2) | |
173 | : "r" (&rw->lock) | |
6d9b37a3 | 174 | : "cc"); |
1da177e4 LT |
175 | } |
176 | ||
fb1c8f93 | 177 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
1da177e4 | 178 | |
1da177e4 | 179 | #endif /* __ASM_SPINLOCK_H */ |