Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
60063497 | 14 | * Do not include directly; use <linux/atomic.h>. |
867e359b CM |
15 | */ |
16 | ||
17 | #ifndef _ASM_TILE_ATOMIC_32_H | |
18 | #define _ASM_TILE_ATOMIC_32_H | |
19 | ||
bd119c69 | 20 | #include <asm/barrier.h> |
867e359b CM |
21 | #include <arch/chip.h> |
22 | ||
23 | #ifndef __ASSEMBLY__ | |
24 | ||
60063497 | 25 | /* Tile-specific routines to support <linux/atomic.h>. */ |
867e359b CM |
26 | int _atomic_xchg(atomic_t *v, int n); |
27 | int _atomic_xchg_add(atomic_t *v, int i); | |
28 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u); | |
29 | int _atomic_cmpxchg(atomic_t *v, int o, int n); | |
30 | ||
31 | /** | |
32 | * atomic_xchg - atomically exchange contents of memory with a new value | |
33 | * @v: pointer of type atomic_t | |
34 | * @i: integer value to store in memory | |
35 | * | |
36 | * Atomically sets @v to @i and returns old @v | |
37 | */ | |
38 | static inline int atomic_xchg(atomic_t *v, int n) | |
39 | { | |
40 | smp_mb(); /* barrier for proper semantics */ | |
41 | return _atomic_xchg(v, n); | |
42 | } | |
43 | ||
44 | /** | |
45 | * atomic_cmpxchg - atomically exchange contents of memory if it matches | |
46 | * @v: pointer of type atomic_t | |
47 | * @o: old value that memory should have | |
48 | * @n: new value to write to memory if it matches | |
49 | * | |
50 | * Atomically checks if @v holds @o and replaces it with @n if so. | |
51 | * Returns the old value at @v. | |
52 | */ | |
53 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | |
54 | { | |
55 | smp_mb(); /* barrier for proper semantics */ | |
56 | return _atomic_cmpxchg(v, o, n); | |
57 | } | |
58 | ||
59 | /** | |
60 | * atomic_add - add integer to atomic variable | |
61 | * @i: integer value to add | |
62 | * @v: pointer of type atomic_t | |
63 | * | |
64 | * Atomically adds @i to @v. | |
65 | */ | |
66 | static inline void atomic_add(int i, atomic_t *v) | |
67 | { | |
68 | _atomic_xchg_add(v, i); | |
69 | } | |
70 | ||
71 | /** | |
72 | * atomic_add_return - add integer and return | |
73 | * @v: pointer of type atomic_t | |
74 | * @i: integer value to add | |
75 | * | |
76 | * Atomically adds @i to @v and returns @i + @v | |
77 | */ | |
78 | static inline int atomic_add_return(int i, atomic_t *v) | |
79 | { | |
80 | smp_mb(); /* barrier for proper semantics */ | |
81 | return _atomic_xchg_add(v, i) + i; | |
82 | } | |
83 | ||
84 | /** | |
f24219b4 | 85 | * __atomic_add_unless - add unless the number is already a given value |
867e359b CM |
86 | * @v: pointer of type atomic_t |
87 | * @a: the amount to add to v... | |
88 | * @u: ...unless v is equal to u. | |
89 | * | |
90 | * Atomically adds @a to @v, so long as @v was not already @u. | |
f24219b4 | 91 | * Returns the old value of @v. |
867e359b | 92 | */ |
f24219b4 | 93 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
867e359b CM |
94 | { |
95 | smp_mb(); /* barrier for proper semantics */ | |
f24219b4 | 96 | return _atomic_xchg_add_unless(v, a, u); |
867e359b CM |
97 | } |
98 | ||
99 | /** | |
100 | * atomic_set - set atomic variable | |
101 | * @v: pointer of type atomic_t | |
102 | * @i: required value | |
103 | * | |
104 | * Atomically sets the value of @v to @i. | |
105 | * | |
106 | * atomic_set() can't be just a raw store, since it would be lost if it | |
107 | * fell between the load and store of one of the other atomic ops. | |
108 | */ | |
109 | static inline void atomic_set(atomic_t *v, int n) | |
110 | { | |
111 | _atomic_xchg(v, n); | |
112 | } | |
113 | ||
867e359b CM |
114 | /* A 64bit atomic type */ |
115 | ||
116 | typedef struct { | |
117 | u64 __aligned(8) counter; | |
118 | } atomic64_t; | |
119 | ||
120 | #define ATOMIC64_INIT(val) { (val) } | |
121 | ||
122 | u64 _atomic64_xchg(atomic64_t *v, u64 n); | |
123 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i); | |
124 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u); | |
125 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n); | |
126 | ||
127 | /** | |
128 | * atomic64_read - read atomic variable | |
129 | * @v: pointer of type atomic64_t | |
130 | * | |
131 | * Atomically reads the value of @v. | |
132 | */ | |
133 | static inline u64 atomic64_read(const atomic64_t *v) | |
134 | { | |
135 | /* | |
136 | * Requires an atomic op to read both 32-bit parts consistently. | |
137 | * Casting away const is safe since the atomic support routines | |
138 | * do not write to memory if the value has not been modified. | |
139 | */ | |
140 | return _atomic64_xchg_add((atomic64_t *)v, 0); | |
141 | } | |
142 | ||
143 | /** | |
144 | * atomic64_xchg - atomically exchange contents of memory with a new value | |
145 | * @v: pointer of type atomic64_t | |
146 | * @i: integer value to store in memory | |
147 | * | |
148 | * Atomically sets @v to @i and returns old @v | |
149 | */ | |
150 | static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | |
151 | { | |
152 | smp_mb(); /* barrier for proper semantics */ | |
153 | return _atomic64_xchg(v, n); | |
154 | } | |
155 | ||
156 | /** | |
157 | * atomic64_cmpxchg - atomically exchange contents of memory if it matches | |
158 | * @v: pointer of type atomic64_t | |
159 | * @o: old value that memory should have | |
160 | * @n: new value to write to memory if it matches | |
161 | * | |
162 | * Atomically checks if @v holds @o and replaces it with @n if so. | |
163 | * Returns the old value at @v. | |
164 | */ | |
165 | static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | |
166 | { | |
167 | smp_mb(); /* barrier for proper semantics */ | |
168 | return _atomic64_cmpxchg(v, o, n); | |
169 | } | |
170 | ||
171 | /** | |
172 | * atomic64_add - add integer to atomic variable | |
173 | * @i: integer value to add | |
174 | * @v: pointer of type atomic64_t | |
175 | * | |
176 | * Atomically adds @i to @v. | |
177 | */ | |
178 | static inline void atomic64_add(u64 i, atomic64_t *v) | |
179 | { | |
180 | _atomic64_xchg_add(v, i); | |
181 | } | |
182 | ||
183 | /** | |
184 | * atomic64_add_return - add integer and return | |
185 | * @v: pointer of type atomic64_t | |
186 | * @i: integer value to add | |
187 | * | |
188 | * Atomically adds @i to @v and returns @i + @v | |
189 | */ | |
190 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |
191 | { | |
192 | smp_mb(); /* barrier for proper semantics */ | |
193 | return _atomic64_xchg_add(v, i) + i; | |
194 | } | |
195 | ||
196 | /** | |
197 | * atomic64_add_unless - add unless the number is already a given value | |
198 | * @v: pointer of type atomic64_t | |
199 | * @a: the amount to add to v... | |
200 | * @u: ...unless v is equal to u. | |
201 | * | |
202 | * Atomically adds @a to @v, so long as @v was not already @u. | |
07feea87 | 203 | * Returns non-zero if @v was not @u, and zero otherwise. |
867e359b CM |
204 | */ |
205 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | |
206 | { | |
207 | smp_mb(); /* barrier for proper semantics */ | |
208 | return _atomic64_xchg_add_unless(v, a, u) != u; | |
209 | } | |
210 | ||
211 | /** | |
212 | * atomic64_set - set atomic variable | |
213 | * @v: pointer of type atomic64_t | |
214 | * @i: required value | |
215 | * | |
216 | * Atomically sets the value of @v to @i. | |
217 | * | |
218 | * atomic64_set() can't be just a raw store, since it would be lost if it | |
219 | * fell between the load and store of one of the other atomic ops. | |
220 | */ | |
221 | static inline void atomic64_set(atomic64_t *v, u64 n) | |
222 | { | |
223 | _atomic64_xchg(v, n); | |
224 | } | |
225 | ||
226 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
227 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
228 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | |
229 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
230 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | |
231 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
232 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | |
233 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
234 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | |
235 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
236 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
237 | ||
238 | /* | |
239 | * We need to barrier before modifying the word, since the _atomic_xxx() | |
240 | * routines just tns the lock and then read/modify/write of the word. | |
241 | * But after the word is updated, the routine issues an "mf" before returning, | |
242 | * and since it's a function call, we don't even need a compiler barrier. | |
243 | */ | |
244 | #define smp_mb__before_atomic_dec() smp_mb() | |
245 | #define smp_mb__before_atomic_inc() smp_mb() | |
246 | #define smp_mb__after_atomic_dec() do { } while (0) | |
247 | #define smp_mb__after_atomic_inc() do { } while (0) | |
248 | ||
867e359b CM |
249 | #endif /* !__ASSEMBLY__ */ |
250 | ||
251 | /* | |
252 | * Internal definitions only beyond this point. | |
253 | */ | |
254 | ||
867e359b CM |
255 | /* |
256 | * Number of atomic locks in atomic_locks[]. Must be a power of two. | |
257 | * There is no reason for more than PAGE_SIZE / 8 entries, since that | |
258 | * is the maximum number of pointer bits we can use to index this. | |
259 | * And we cannot have more than PAGE_SIZE / 4, since this has to | |
260 | * fit on a single page and each entry takes 4 bytes. | |
261 | */ | |
262 | #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) | |
263 | #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) | |
264 | ||
265 | #ifndef __ASSEMBLY__ | |
266 | extern int atomic_locks[]; | |
267 | #endif | |
268 | ||
867e359b CM |
269 | /* |
270 | * All the code that may fault while holding an atomic lock must | |
271 | * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code | |
272 | * can correctly release and reacquire the lock. Note that we | |
273 | * mention the register number in a comment in "lib/atomic_asm.S" to help | |
274 | * assembly coders from using this register by mistake, so if it | |
275 | * is changed here, change that comment as well. | |
276 | */ | |
277 | #define ATOMIC_LOCK_REG 20 | |
278 | #define ATOMIC_LOCK_REG_NAME r20 | |
279 | ||
280 | #ifndef __ASSEMBLY__ | |
281 | /* Called from setup to initialize a hash table to point to per_cpu locks. */ | |
282 | void __init_atomic_per_cpu(void); | |
283 | ||
284 | #ifdef CONFIG_SMP | |
285 | /* Support releasing the atomic lock in do_page_fault_ics(). */ | |
286 | void __atomic_fault_unlock(int *lock_ptr); | |
287 | #endif | |
0707ad30 | 288 | |
47d632f9 CM |
289 | /* Return a pointer to the lock for the given address. */ |
290 | int *__atomic_hashed_lock(volatile void *v); | |
291 | ||
0707ad30 | 292 | /* Private helper routines in lib/atomic_asm_32.S */ |
47d632f9 CM |
293 | struct __get_user { |
294 | unsigned long val; | |
295 | int err; | |
296 | }; | |
0707ad30 CM |
297 | extern struct __get_user __atomic_cmpxchg(volatile int *p, |
298 | int *lock, int o, int n); | |
299 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | |
300 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | |
301 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | |
302 | int *lock, int o, int n); | |
303 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | |
304 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | |
305 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | |
306 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | |
307 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | |
308 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | |
309 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | |
310 | int *lock, u64 o, u64 n); | |
311 | ||
47d632f9 CM |
312 | /* Return failure from the atomic wrappers. */ |
313 | struct __get_user __atomic_bad_address(int __user *addr); | |
314 | ||
867e359b CM |
315 | #endif /* !__ASSEMBLY__ */ |
316 | ||
317 | #endif /* _ASM_TILE_ATOMIC_32_H */ |