Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[deliverable/linux.git] / arch / metag / include / asm / spinlock_lnkget.h
1 #ifndef __ASM_SPINLOCK_LNKGET_H
2 #define __ASM_SPINLOCK_LNKGET_H
3
4 /*
5 * None of these asm statements clobber memory as LNKSET writes around
6 * the cache so the memory it modifies cannot safely be read by any means
7 * other than these accessors.
8 */
9
10 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
11 {
12 int ret;
13
14 asm volatile ("LNKGETD %0, [%1]\n"
15 "TST %0, #1\n"
16 "MOV %0, #1\n"
17 "XORZ %0, %0, %0\n"
18 : "=&d" (ret)
19 : "da" (&lock->lock)
20 : "cc");
21 return ret;
22 }
23
24 static inline void arch_spin_lock(arch_spinlock_t *lock)
25 {
26 int tmp;
27
28 asm volatile ("1: LNKGETD %0,[%1]\n"
29 " TST %0, #1\n"
30 " ADD %0, %0, #1\n"
31 " LNKSETDZ [%1], %0\n"
32 " BNZ 1b\n"
33 " DEFR %0, TXSTAT\n"
34 " ANDT %0, %0, #HI(0x3f000000)\n"
35 " CMPT %0, #HI(0x02000000)\n"
36 " BNZ 1b\n"
37 : "=&d" (tmp)
38 : "da" (&lock->lock)
39 : "cc");
40
41 smp_mb();
42 }
43
44 /* Returns 0 if failed to acquire lock */
45 static inline int arch_spin_trylock(arch_spinlock_t *lock)
46 {
47 int tmp;
48
49 asm volatile (" LNKGETD %0,[%1]\n"
50 " TST %0, #1\n"
51 " ADD %0, %0, #1\n"
52 " LNKSETDZ [%1], %0\n"
53 " BNZ 1f\n"
54 " DEFR %0, TXSTAT\n"
55 " ANDT %0, %0, #HI(0x3f000000)\n"
56 " CMPT %0, #HI(0x02000000)\n"
57 " MOV %0, #1\n"
58 "1: XORNZ %0, %0, %0\n"
59 : "=&d" (tmp)
60 : "da" (&lock->lock)
61 : "cc");
62
63 smp_mb();
64
65 return tmp;
66 }
67
68 static inline void arch_spin_unlock(arch_spinlock_t *lock)
69 {
70 smp_mb();
71
72 asm volatile (" SETD [%0], %1\n"
73 :
74 : "da" (&lock->lock), "da" (0)
75 : "memory");
76 }
77
78 /*
79 * RWLOCKS
80 *
81 *
82 * Write locks are easy - we just set bit 31. When unlocking, we can
83 * just write zero since the lock is exclusively held.
84 */
85
86 static inline void arch_write_lock(arch_rwlock_t *rw)
87 {
88 int tmp;
89
90 asm volatile ("1: LNKGETD %0,[%1]\n"
91 " CMP %0, #0\n"
92 " ADD %0, %0, %2\n"
93 " LNKSETDZ [%1], %0\n"
94 " BNZ 1b\n"
95 " DEFR %0, TXSTAT\n"
96 " ANDT %0, %0, #HI(0x3f000000)\n"
97 " CMPT %0, #HI(0x02000000)\n"
98 " BNZ 1b\n"
99 : "=&d" (tmp)
100 : "da" (&rw->lock), "bd" (0x80000000)
101 : "cc");
102
103 smp_mb();
104 }
105
106 static inline int arch_write_trylock(arch_rwlock_t *rw)
107 {
108 int tmp;
109
110 asm volatile (" LNKGETD %0,[%1]\n"
111 " CMP %0, #0\n"
112 " ADD %0, %0, %2\n"
113 " LNKSETDZ [%1], %0\n"
114 " BNZ 1f\n"
115 " DEFR %0, TXSTAT\n"
116 " ANDT %0, %0, #HI(0x3f000000)\n"
117 " CMPT %0, #HI(0x02000000)\n"
118 " MOV %0,#1\n"
119 "1: XORNZ %0, %0, %0\n"
120 : "=&d" (tmp)
121 : "da" (&rw->lock), "bd" (0x80000000)
122 : "cc");
123
124 smp_mb();
125
126 return tmp;
127 }
128
129 static inline void arch_write_unlock(arch_rwlock_t *rw)
130 {
131 smp_mb();
132
133 asm volatile (" SETD [%0], %1\n"
134 :
135 : "da" (&rw->lock), "da" (0)
136 : "memory");
137 }
138
139 /* write_can_lock - would write_trylock() succeed? */
140 static inline int arch_write_can_lock(arch_rwlock_t *rw)
141 {
142 int ret;
143
144 asm volatile ("LNKGETD %0, [%1]\n"
145 "CMP %0, #0\n"
146 "MOV %0, #1\n"
147 "XORNZ %0, %0, %0\n"
148 : "=&d" (ret)
149 : "da" (&rw->lock)
150 : "cc");
151 return ret;
152 }
153
154 /*
155 * Read locks are a bit more hairy:
156 * - Exclusively load the lock value.
157 * - Increment it.
158 * - Store new lock value if positive, and we still own this location.
159 * If the value is negative, we've already failed.
160 * - If we failed to store the value, we want a negative result.
161 * - If we failed, try again.
162 * Unlocking is similarly hairy. We may have multiple read locks
163 * currently active. However, we know we won't have any write
164 * locks.
165 */
166 static inline void arch_read_lock(arch_rwlock_t *rw)
167 {
168 int tmp;
169
170 asm volatile ("1: LNKGETD %0,[%1]\n"
171 " ADDS %0, %0, #1\n"
172 " LNKSETDPL [%1], %0\n"
173 " BMI 1b\n"
174 " DEFR %0, TXSTAT\n"
175 " ANDT %0, %0, #HI(0x3f000000)\n"
176 " CMPT %0, #HI(0x02000000)\n"
177 " BNZ 1b\n"
178 : "=&d" (tmp)
179 : "da" (&rw->lock)
180 : "cc");
181
182 smp_mb();
183 }
184
185 static inline void arch_read_unlock(arch_rwlock_t *rw)
186 {
187 int tmp;
188
189 smp_mb();
190
191 asm volatile ("1: LNKGETD %0,[%1]\n"
192 " SUB %0, %0, #1\n"
193 " LNKSETD [%1], %0\n"
194 " DEFR %0, TXSTAT\n"
195 " ANDT %0, %0, #HI(0x3f000000)\n"
196 " CMPT %0, #HI(0x02000000)\n"
197 " BNZ 1b\n"
198 : "=&d" (tmp)
199 : "da" (&rw->lock)
200 : "cc", "memory");
201 }
202
203 static inline int arch_read_trylock(arch_rwlock_t *rw)
204 {
205 int tmp;
206
207 asm volatile (" LNKGETD %0,[%1]\n"
208 " ADDS %0, %0, #1\n"
209 " LNKSETDPL [%1], %0\n"
210 " BMI 1f\n"
211 " DEFR %0, TXSTAT\n"
212 " ANDT %0, %0, #HI(0x3f000000)\n"
213 " CMPT %0, #HI(0x02000000)\n"
214 " MOV %0,#1\n"
215 " BZ 2f\n"
216 "1: MOV %0,#0\n"
217 "2:\n"
218 : "=&d" (tmp)
219 : "da" (&rw->lock)
220 : "cc");
221
222 smp_mb();
223
224 return tmp;
225 }
226
227 /* read_can_lock - would read_trylock() succeed? */
228 static inline int arch_read_can_lock(arch_rwlock_t *rw)
229 {
230 int tmp;
231
232 asm volatile ("LNKGETD %0, [%1]\n"
233 "CMP %0, %2\n"
234 "MOV %0, #1\n"
235 "XORZ %0, %0, %0\n"
236 : "=&d" (tmp)
237 : "da" (&rw->lock), "bd" (0x80000000)
238 : "cc");
239 return tmp;
240 }
241
242 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
243 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
244
245 #define arch_spin_relax(lock) cpu_relax()
246 #define arch_read_relax(lock) cpu_relax()
247 #define arch_write_relax(lock) cpu_relax()
248
249 #endif /* __ASM_SPINLOCK_LNKGET_H */
This page took 0.036812 seconds and 5 git commands to generate.