Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #if __LINUX_ARM_ARCH__ < 6 | |
5 | #error SMP not supported on pre-ARMv6 CPUs | |
6 | #endif | |
7 | ||
8 | /* | |
9 | * ARMv6 Spin-locking. | |
10 | * | |
6d9b37a3 RK |
11 | * We exclusively read the old value. If it is zero, we may have |
12 | * won the lock, so we try exclusively storing it. A memory barrier | |
13 | * is required after we get a lock, and before we release it, because | |
14 | * V6 CPUs are assumed to have weakly ordered memory. | |
1da177e4 LT |
15 | * |
16 | * Unlocked value: 0 | |
17 | * Locked value: 1 | |
18 | */ | |
19 | typedef struct { | |
20 | volatile unsigned int lock; | |
21 | #ifdef CONFIG_PREEMPT | |
22 | unsigned int break_lock; | |
23 | #endif | |
24 | } spinlock_t; | |
25 | ||
26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | |
27 | ||
28 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) | |
29 | #define spin_is_locked(x) ((x)->lock != 0) | |
30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | |
31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | |
32 | ||
33 | static inline void _raw_spin_lock(spinlock_t *lock) | |
34 | { | |
35 | unsigned long tmp; | |
36 | ||
37 | __asm__ __volatile__( | |
38 | "1: ldrex %0, [%1]\n" | |
39 | " teq %0, #0\n" | |
40 | " strexeq %0, %2, [%1]\n" | |
41 | " teqeq %0, #0\n" | |
42 | " bne 1b" | |
43 | : "=&r" (tmp) | |
44 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
45 | : "cc"); |
46 | ||
47 | smp_mb(); | |
1da177e4 LT |
48 | } |
49 | ||
50 | static inline int _raw_spin_trylock(spinlock_t *lock) | |
51 | { | |
52 | unsigned long tmp; | |
53 | ||
54 | __asm__ __volatile__( | |
55 | " ldrex %0, [%1]\n" | |
56 | " teq %0, #0\n" | |
57 | " strexeq %0, %2, [%1]" | |
58 | : "=&r" (tmp) | |
59 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
60 | : "cc"); |
61 | ||
62 | if (tmp == 0) { | |
63 | smp_mb(); | |
64 | return 1; | |
65 | } else { | |
66 | return 0; | |
67 | } | |
1da177e4 LT |
68 | } |
69 | ||
70 | static inline void _raw_spin_unlock(spinlock_t *lock) | |
71 | { | |
6d9b37a3 RK |
72 | smp_mb(); |
73 | ||
1da177e4 LT |
74 | __asm__ __volatile__( |
75 | " str %1, [%0]" | |
76 | : | |
77 | : "r" (&lock->lock), "r" (0) | |
6d9b37a3 | 78 | : "cc"); |
1da177e4 LT |
79 | } |
80 | ||
81 | /* | |
82 | * RWLOCKS | |
83 | */ | |
84 | typedef struct { | |
85 | volatile unsigned int lock; | |
86 | #ifdef CONFIG_PREEMPT | |
87 | unsigned int break_lock; | |
88 | #endif | |
89 | } rwlock_t; | |
90 | ||
91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | |
4e8fd22b RK |
92 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) |
93 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) | |
1da177e4 LT |
94 | |
95 | /* | |
96 | * Write locks are easy - we just set bit 31. When unlocking, we can | |
97 | * just write zero since the lock is exclusively held. | |
98 | */ | |
99 | static inline void _raw_write_lock(rwlock_t *rw) | |
100 | { | |
101 | unsigned long tmp; | |
102 | ||
103 | __asm__ __volatile__( | |
104 | "1: ldrex %0, [%1]\n" | |
105 | " teq %0, #0\n" | |
106 | " strexeq %0, %2, [%1]\n" | |
107 | " teq %0, #0\n" | |
108 | " bne 1b" | |
109 | : "=&r" (tmp) | |
110 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
111 | : "cc"); |
112 | ||
113 | smp_mb(); | |
1da177e4 LT |
114 | } |
115 | ||
4e8fd22b RK |
116 | static inline int _raw_write_trylock(rwlock_t *rw) |
117 | { | |
118 | unsigned long tmp; | |
119 | ||
120 | __asm__ __volatile__( | |
121 | "1: ldrex %0, [%1]\n" | |
122 | " teq %0, #0\n" | |
123 | " strexeq %0, %2, [%1]" | |
124 | : "=&r" (tmp) | |
125 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
126 | : "cc"); |
127 | ||
128 | if (tmp == 0) { | |
129 | smp_mb(); | |
130 | return 1; | |
131 | } else { | |
132 | return 0; | |
133 | } | |
4e8fd22b RK |
134 | } |
135 | ||
1da177e4 LT |
136 | static inline void _raw_write_unlock(rwlock_t *rw) |
137 | { | |
6d9b37a3 RK |
138 | smp_mb(); |
139 | ||
1da177e4 LT |
140 | __asm__ __volatile__( |
141 | "str %1, [%0]" | |
142 | : | |
143 | : "r" (&rw->lock), "r" (0) | |
6d9b37a3 | 144 | : "cc"); |
1da177e4 LT |
145 | } |
146 | ||
147 | /* | |
148 | * Read locks are a bit more hairy: | |
149 | * - Exclusively load the lock value. | |
150 | * - Increment it. | |
151 | * - Store new lock value if positive, and we still own this location. | |
152 | * If the value is negative, we've already failed. | |
153 | * - If we failed to store the value, we want a negative result. | |
154 | * - If we failed, try again. | |
155 | * Unlocking is similarly hairy. We may have multiple read locks | |
156 | * currently active. However, we know we won't have any write | |
157 | * locks. | |
158 | */ | |
159 | static inline void _raw_read_lock(rwlock_t *rw) | |
160 | { | |
161 | unsigned long tmp, tmp2; | |
162 | ||
163 | __asm__ __volatile__( | |
164 | "1: ldrex %0, [%2]\n" | |
165 | " adds %0, %0, #1\n" | |
166 | " strexpl %1, %0, [%2]\n" | |
167 | " rsbpls %0, %1, #0\n" | |
168 | " bmi 1b" | |
169 | : "=&r" (tmp), "=&r" (tmp2) | |
170 | : "r" (&rw->lock) | |
6d9b37a3 RK |
171 | : "cc"); |
172 | ||
173 | smp_mb(); | |
1da177e4 LT |
174 | } |
175 | ||
176 | static inline void _raw_read_unlock(rwlock_t *rw) | |
177 | { | |
4e8fd22b RK |
178 | unsigned long tmp, tmp2; |
179 | ||
6d9b37a3 RK |
180 | smp_mb(); |
181 | ||
1da177e4 LT |
182 | __asm__ __volatile__( |
183 | "1: ldrex %0, [%2]\n" | |
184 | " sub %0, %0, #1\n" | |
185 | " strex %1, %0, [%2]\n" | |
186 | " teq %1, #0\n" | |
187 | " bne 1b" | |
188 | : "=&r" (tmp), "=&r" (tmp2) | |
189 | : "r" (&rw->lock) | |
6d9b37a3 | 190 | : "cc"); |
1da177e4 LT |
191 | } |
192 | ||
193 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | |
194 | ||
1da177e4 | 195 | #endif /* __ASM_SPINLOCK_H */ |