Commit | Line | Data |
---|---|---|
62fde541 TH |
1 | /* |
2 | * linux/percpu-defs.h - basic definitions for percpu areas | |
3 | * | |
4 | * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. | |
5 | * | |
6 | * This file is separate from linux/percpu.h to avoid cyclic inclusion | |
7 | * dependency from arch header files. Only to be included from | |
8 | * asm/percpu.h. | |
9 | * | |
10 | * This file includes macros necessary to declare percpu sections and | |
11 | * variables, and definitions of percpu accessors and operations. It | |
12 | * should provide enough percpu features to arch header files even when | |
13 | * they can only include asm/percpu.h to avoid cyclic inclusion dependency. | |
14 | */ | |
15 | ||
5028eaa9 DH |
16 | #ifndef _LINUX_PERCPU_DEFS_H |
17 | #define _LINUX_PERCPU_DEFS_H | |
18 | ||
62fde541 TH |
19 | #ifdef CONFIG_SMP |
20 | ||
21 | #ifdef MODULE | |
22 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | |
23 | #define PER_CPU_ALIGNED_SECTION "" | |
24 | #else | |
25 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" | |
26 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | |
27 | #endif | |
28 | #define PER_CPU_FIRST_SECTION "..first" | |
29 | ||
30 | #else | |
31 | ||
32 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | |
33 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | |
34 | #define PER_CPU_FIRST_SECTION "" | |
35 | ||
36 | #endif | |
37 | ||
5028eaa9 DH |
38 | /* |
39 | * Base implementations of per-CPU variable declarations and definitions, where | |
40 | * the section in which the variable is to be placed is provided by the | |
7c756e6e | 41 | * 'sec' argument. This may be used to affect the parameters governing the |
5028eaa9 DH |
42 | * variable's storage. |
43 | * | |
44 | * NOTE! The sections for the DECLARE and for the DEFINE must match, lest | |
45 | * linkage errors occur due the compiler generating the wrong code to access | |
46 | * that section. | |
47 | */ | |
7c756e6e | 48 | #define __PCPU_ATTRS(sec) \ |
e0fdb0e0 | 49 | __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ |
7c756e6e TH |
50 | PER_CPU_ATTRIBUTES |
51 | ||
52 | #define __PCPU_DUMMY_ATTRS \ | |
53 | __attribute__((section(".discard"), unused)) | |
54 | ||
55 | /* | |
56 | * s390 and alpha modules require percpu variables to be defined as | |
57 | * weak to force the compiler to generate GOT based external | |
58 | * references for them. This is necessary because percpu sections | |
59 | * will be located outside of the usually addressable area. | |
60 | * | |
61 | * This definition puts the following two extra restrictions when | |
62 | * defining percpu variables. | |
63 | * | |
64 | * 1. The symbol must be globally unique, even the static ones. | |
65 | * 2. Static percpu variables cannot be defined inside a function. | |
66 | * | |
67 | * Archs which need weak percpu definitions should define | |
68 | * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. | |
69 | * | |
70 | * To ensure that the generic code observes the above two | |
71 | * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak | |
72 | * definition is used for all cases. | |
73 | */ | |
74 | #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) | |
75 | /* | |
76 | * __pcpu_scope_* dummy variable is used to enforce scope. It | |
77 | * receives the static modifier when it's used in front of | |
78 | * DEFINE_PER_CPU() and will trigger build failure if | |
79 | * DECLARE_PER_CPU() is used for the same variable. | |
80 | * | |
81 | * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness | |
82 | * such that hidden weak symbol collision, which will cause unrelated | |
83 | * variables to share the same address, can be detected during build. | |
84 | */ | |
85 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ | |
86 | extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | |
dd17c8f7 | 87 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
7c756e6e TH |
88 | |
89 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ | |
90 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | |
0f5e4816 | 91 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
7c756e6e | 92 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
b1a0fbfd | 93 | extern __PCPU_ATTRS(sec) __typeof__(type) name; \ |
c43768cb | 94 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
dd17c8f7 | 95 | __typeof__(type) name |
7c756e6e TH |
96 | #else |
97 | /* | |
98 | * Normal declaration and definition macros. | |
99 | */ | |
100 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ | |
dd17c8f7 | 101 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
7c756e6e TH |
102 | |
103 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ | |
c43768cb | 104 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ |
dd17c8f7 | 105 | __typeof__(type) name |
7c756e6e | 106 | #endif |
5028eaa9 DH |
107 | |
108 | /* | |
109 | * Variant on the per-CPU variable declaration/definition theme used for | |
110 | * ordinary per-CPU variables. | |
111 | */ | |
112 | #define DECLARE_PER_CPU(type, name) \ | |
113 | DECLARE_PER_CPU_SECTION(type, name, "") | |
114 | ||
115 | #define DEFINE_PER_CPU(type, name) \ | |
116 | DEFINE_PER_CPU_SECTION(type, name, "") | |
117 | ||
118 | /* | |
119 | * Declaration/definition used for per-CPU variables that must come first in | |
120 | * the set of variables. | |
121 | */ | |
122 | #define DECLARE_PER_CPU_FIRST(type, name) \ | |
123 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | |
124 | ||
125 | #define DEFINE_PER_CPU_FIRST(type, name) \ | |
126 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | |
127 | ||
128 | /* | |
129 | * Declaration/definition used for per-CPU variables that must be cacheline | |
130 | * aligned under SMP conditions so that, whilst a particular instance of the | |
131 | * data corresponds to a particular CPU, inefficiencies due to direct access by | |
132 | * other CPUs are reduced by preventing the data from unnecessarily spanning | |
133 | * cachelines. | |
134 | * | |
135 | * An example of this would be statistical data, where each CPU's set of data | |
136 | * is updated by that CPU alone, but the data from across all CPUs is collated | |
137 | * by a CPU processing a read from a proc file. | |
138 | */ | |
139 | #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
140 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | |
141 | ____cacheline_aligned_in_smp | |
142 | ||
143 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
144 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | |
145 | ____cacheline_aligned_in_smp | |
146 | ||
53f82452 JF |
147 | #define DECLARE_PER_CPU_ALIGNED(type, name) \ |
148 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ | |
149 | ____cacheline_aligned | |
150 | ||
151 | #define DEFINE_PER_CPU_ALIGNED(type, name) \ | |
152 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ | |
153 | ____cacheline_aligned | |
154 | ||
5028eaa9 DH |
155 | /* |
156 | * Declaration/definition used for per-CPU variables that must be page aligned. | |
157 | */ | |
3e352aa8 | 158 | #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ |
3d9a854c | 159 | DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
3e352aa8 | 160 | __aligned(PAGE_SIZE) |
5028eaa9 DH |
161 | |
162 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | |
3d9a854c | 163 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
3e352aa8 | 164 | __aligned(PAGE_SIZE) |
5028eaa9 | 165 | |
c957ef2c SL |
166 | /* |
167 | * Declaration/definition used for per-CPU variables that must be read mostly. | |
168 | */ | |
169 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ | |
330d2822 | 170 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
c957ef2c SL |
171 | |
172 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ | |
330d2822 | 173 | DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
c957ef2c | 174 | |
5028eaa9 | 175 | /* |
545695fb TH |
176 | * Intermodule exports for per-CPU variables. sparse forgets about |
177 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to | |
178 | * noop if __CHECKER__. | |
5028eaa9 | 179 | */ |
545695fb | 180 | #ifndef __CHECKER__ |
dd17c8f7 RR |
181 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) |
182 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) | |
545695fb TH |
183 | #else |
184 | #define EXPORT_PER_CPU_SYMBOL(var) | |
185 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) | |
186 | #endif | |
5028eaa9 | 187 | |
62fde541 TH |
188 | /* |
189 | * Accessors and operations. | |
190 | */ | |
191 | #ifndef __ASSEMBLY__ | |
192 | ||
9c28278a | 193 | /* |
6fbc07bb TH |
194 | * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating |
195 | * @ptr and is invoked once before a percpu area is accessed by all | |
196 | * accessors and operations. This is performed in the generic part of | |
197 | * percpu and arch overrides don't need to worry about it; however, if an | |
198 | * arch wants to implement an arch-specific percpu accessor or operation, | |
199 | * it may use __verify_pcpu_ptr() to verify the parameters. | |
9c28278a TH |
200 | * |
201 | * + 0 is required in order to convert the pointer type from a | |
202 | * potential array type to a pointer to a single item of the array. | |
203 | */ | |
eba11788 TH |
204 | #define __verify_pcpu_ptr(ptr) \ |
205 | do { \ | |
9c28278a TH |
206 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ |
207 | (void)__vpp_verify; \ | |
208 | } while (0) | |
209 | ||
62fde541 TH |
210 | #ifdef CONFIG_SMP |
211 | ||
212 | /* | |
213 | * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() | |
214 | * to prevent the compiler from making incorrect assumptions about the | |
215 | * pointer value. The weird cast keeps both GCC and sparse happy. | |
216 | */ | |
eba11788 | 217 | #define SHIFT_PERCPU_PTR(__p, __offset) \ |
6fbc07bb TH |
218 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) |
219 | ||
220 | #define per_cpu_ptr(ptr, cpu) \ | |
eba11788 | 221 | ({ \ |
6fbc07bb TH |
222 | __verify_pcpu_ptr(ptr); \ |
223 | SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ | |
62fde541 TH |
224 | }) |
225 | ||
6fbc07bb TH |
226 | #define raw_cpu_ptr(ptr) \ |
227 | ({ \ | |
228 | __verify_pcpu_ptr(ptr); \ | |
229 | arch_raw_cpu_ptr(ptr); \ | |
230 | }) | |
62fde541 TH |
231 | |
232 | #ifdef CONFIG_DEBUG_PREEMPT | |
6fbc07bb TH |
233 | #define this_cpu_ptr(ptr) \ |
234 | ({ \ | |
235 | __verify_pcpu_ptr(ptr); \ | |
236 | SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ | |
237 | }) | |
62fde541 TH |
238 | #else |
239 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | |
240 | #endif | |
241 | ||
62fde541 TH |
242 | #else /* CONFIG_SMP */ |
243 | ||
eba11788 TH |
244 | #define VERIFY_PERCPU_PTR(__p) \ |
245 | ({ \ | |
246 | __verify_pcpu_ptr(__p); \ | |
247 | (typeof(*(__p)) __kernel __force *)(__p); \ | |
62fde541 TH |
248 | }) |
249 | ||
eba11788 | 250 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) |
3b8ed91d TH |
251 | #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) |
252 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | |
62fde541 TH |
253 | |
254 | #endif /* CONFIG_SMP */ | |
255 | ||
3b8ed91d | 256 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
3b8ed91d | 257 | |
9defda18 TH |
258 | /* |
259 | * Must be an lvalue. Since @var must be a simple identifier, | |
260 | * we force a syntax error here if it isn't. | |
261 | */ | |
eba11788 TH |
262 | #define get_cpu_var(var) \ |
263 | (*({ \ | |
264 | preempt_disable(); \ | |
265 | this_cpu_ptr(&var); \ | |
266 | })) | |
9defda18 TH |
267 | |
268 | /* | |
269 | * The weird & is necessary because sparse considers (void)(var) to be | |
270 | * a direct dereference of percpu variable (var). | |
271 | */ | |
eba11788 TH |
272 | #define put_cpu_var(var) \ |
273 | do { \ | |
274 | (void)&(var); \ | |
275 | preempt_enable(); \ | |
9defda18 TH |
276 | } while (0) |
277 | ||
eba11788 TH |
278 | #define get_cpu_ptr(var) \ |
279 | ({ \ | |
280 | preempt_disable(); \ | |
281 | this_cpu_ptr(var); \ | |
282 | }) | |
9defda18 | 283 | |
eba11788 TH |
284 | #define put_cpu_ptr(var) \ |
285 | do { \ | |
286 | (void)(var); \ | |
287 | preempt_enable(); \ | |
9defda18 TH |
288 | } while (0) |
289 | ||
a32f8d8e TH |
290 | /* |
291 | * Branching function to split up a function into a set of functions that | |
292 | * are called for different scalar sizes of the objects handled. | |
293 | */ | |
294 | ||
295 | extern void __bad_size_call_parameter(void); | |
296 | ||
297 | #ifdef CONFIG_DEBUG_PREEMPT | |
298 | extern void __this_cpu_preempt_check(const char *op); | |
299 | #else | |
300 | static inline void __this_cpu_preempt_check(const char *op) { } | |
301 | #endif | |
302 | ||
303 | #define __pcpu_size_call_return(stem, variable) \ | |
eba11788 TH |
304 | ({ \ |
305 | typeof(variable) pscr_ret__; \ | |
a32f8d8e TH |
306 | __verify_pcpu_ptr(&(variable)); \ |
307 | switch(sizeof(variable)) { \ | |
eba11788 TH |
308 | case 1: pscr_ret__ = stem##1(variable); break; \ |
309 | case 2: pscr_ret__ = stem##2(variable); break; \ | |
310 | case 4: pscr_ret__ = stem##4(variable); break; \ | |
311 | case 8: pscr_ret__ = stem##8(variable); break; \ | |
a32f8d8e | 312 | default: \ |
eba11788 | 313 | __bad_size_call_parameter(); break; \ |
a32f8d8e TH |
314 | } \ |
315 | pscr_ret__; \ | |
316 | }) | |
317 | ||
318 | #define __pcpu_size_call_return2(stem, variable, ...) \ | |
319 | ({ \ | |
320 | typeof(variable) pscr2_ret__; \ | |
321 | __verify_pcpu_ptr(&(variable)); \ | |
322 | switch(sizeof(variable)) { \ | |
323 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | |
324 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | |
325 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | |
326 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | |
327 | default: \ | |
328 | __bad_size_call_parameter(); break; \ | |
329 | } \ | |
330 | pscr2_ret__; \ | |
331 | }) | |
332 | ||
333 | /* | |
334 | * Special handling for cmpxchg_double. cmpxchg_double is passed two | |
335 | * percpu variables. The first has to be aligned to a double word | |
336 | * boundary and the second has to follow directly thereafter. | |
337 | * We enforce this on all architectures even if they don't support | |
338 | * a double cmpxchg instruction, since it's a cheap requirement, and it | |
339 | * avoids breaking the requirement for architectures with the instruction. | |
340 | */ | |
341 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ | |
342 | ({ \ | |
343 | bool pdcrb_ret__; \ | |
eba11788 | 344 | __verify_pcpu_ptr(&(pcp1)); \ |
a32f8d8e | 345 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ |
eba11788 TH |
346 | VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ |
347 | VM_BUG_ON((unsigned long)(&(pcp2)) != \ | |
348 | (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ | |
a32f8d8e TH |
349 | switch(sizeof(pcp1)) { \ |
350 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ | |
351 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ | |
352 | case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ | |
353 | case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ | |
354 | default: \ | |
355 | __bad_size_call_parameter(); break; \ | |
356 | } \ | |
357 | pdcrb_ret__; \ | |
358 | }) | |
359 | ||
360 | #define __pcpu_size_call(stem, variable, ...) \ | |
361 | do { \ | |
362 | __verify_pcpu_ptr(&(variable)); \ | |
363 | switch(sizeof(variable)) { \ | |
364 | case 1: stem##1(variable, __VA_ARGS__);break; \ | |
365 | case 2: stem##2(variable, __VA_ARGS__);break; \ | |
366 | case 4: stem##4(variable, __VA_ARGS__);break; \ | |
367 | case 8: stem##8(variable, __VA_ARGS__);break; \ | |
368 | default: \ | |
369 | __bad_size_call_parameter();break; \ | |
370 | } \ | |
371 | } while (0) | |
372 | ||
373 | /* | |
374 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> | |
375 | * | |
376 | * Optimized manipulation for memory allocated through the per cpu | |
377 | * allocator or for addresses of per cpu variables. | |
378 | * | |
379 | * These operation guarantee exclusivity of access for other operations | |
380 | * on the *same* processor. The assumption is that per cpu data is only | |
381 | * accessed by a single processor instance (the current one). | |
382 | * | |
383 | * The arch code can provide optimized implementation by defining macros | |
384 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per | |
385 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does | |
386 | * not provide operations for a scalar size then the fallback in the | |
387 | * generic code will be used. | |
eba11788 TH |
388 | * |
389 | * cmpxchg_double replaces two adjacent scalars at once. The first two | |
390 | * parameters are per cpu variables which have to be of the same size. A | |
391 | * truth value is returned to indicate success or failure (since a double | |
392 | * register result is difficult to handle). There is very limited hardware | |
393 | * support for these operations, so only certain sizes may work. | |
a32f8d8e TH |
394 | */ |
395 | ||
396 | /* | |
eba11788 TH |
397 | * Operations for contexts where we do not want to do any checks for |
398 | * preemptions. Unless strictly necessary, always use [__]this_cpu_*() | |
399 | * instead. | |
a32f8d8e | 400 | * |
eba11788 TH |
401 | * If there is no other protection through preempt disable and/or disabling |
402 | * interupts then one of these RMW operations can show unexpected behavior | |
403 | * because the execution thread was rescheduled on another processor or an | |
404 | * interrupt occurred and the same percpu variable was modified from the | |
405 | * interrupt context. | |
a32f8d8e | 406 | */ |
eba11788 TH |
407 | #define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) |
408 | #define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) | |
409 | #define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) | |
410 | #define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) | |
411 | #define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) | |
412 | #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | |
413 | #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) | |
414 | #define raw_cpu_cmpxchg(pcp, oval, nval) \ | |
a32f8d8e | 415 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) |
eba11788 TH |
416 | #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
417 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) | |
418 | ||
419 | #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) | |
420 | #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) | |
421 | #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) | |
422 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | |
423 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | |
424 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | |
a32f8d8e TH |
425 | |
426 | /* | |
eba11788 TH |
427 | * Operations for contexts that are safe from preemption/interrupts. These |
428 | * operations verify that preemption is disabled. | |
a32f8d8e | 429 | */ |
eba11788 TH |
430 | #define __this_cpu_read(pcp) \ |
431 | ({ \ | |
432 | __this_cpu_preempt_check("read"); \ | |
433 | raw_cpu_read(pcp); \ | |
434 | }) | |
a32f8d8e | 435 | |
eba11788 TH |
436 | #define __this_cpu_write(pcp, val) \ |
437 | ({ \ | |
438 | __this_cpu_preempt_check("write"); \ | |
439 | raw_cpu_write(pcp, val); \ | |
440 | }) | |
a32f8d8e | 441 | |
eba11788 TH |
442 | #define __this_cpu_add(pcp, val) \ |
443 | ({ \ | |
444 | __this_cpu_preempt_check("add"); \ | |
cadb1c4d | 445 | raw_cpu_add(pcp, val); \ |
eba11788 | 446 | }) |
a32f8d8e | 447 | |
eba11788 TH |
448 | #define __this_cpu_and(pcp, val) \ |
449 | ({ \ | |
450 | __this_cpu_preempt_check("and"); \ | |
cadb1c4d | 451 | raw_cpu_and(pcp, val); \ |
eba11788 | 452 | }) |
a32f8d8e | 453 | |
eba11788 TH |
454 | #define __this_cpu_or(pcp, val) \ |
455 | ({ \ | |
456 | __this_cpu_preempt_check("or"); \ | |
cadb1c4d | 457 | raw_cpu_or(pcp, val); \ |
eba11788 | 458 | }) |
a32f8d8e | 459 | |
eba11788 TH |
460 | #define __this_cpu_add_return(pcp, val) \ |
461 | ({ \ | |
462 | __this_cpu_preempt_check("add_return"); \ | |
463 | raw_cpu_add_return(pcp, val); \ | |
464 | }) | |
a32f8d8e | 465 | |
eba11788 TH |
466 | #define __this_cpu_xchg(pcp, nval) \ |
467 | ({ \ | |
468 | __this_cpu_preempt_check("xchg"); \ | |
469 | raw_cpu_xchg(pcp, nval); \ | |
470 | }) | |
a32f8d8e | 471 | |
eba11788 TH |
472 | #define __this_cpu_cmpxchg(pcp, oval, nval) \ |
473 | ({ \ | |
474 | __this_cpu_preempt_check("cmpxchg"); \ | |
475 | raw_cpu_cmpxchg(pcp, oval, nval); \ | |
476 | }) | |
a32f8d8e | 477 | |
eba11788 TH |
478 | #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
479 | ({ __this_cpu_preempt_check("cmpxchg_double"); \ | |
480 | raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ | |
481 | }) | |
a32f8d8e | 482 | |
eba11788 TH |
483 | #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) |
484 | #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) | |
485 | #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) | |
486 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | |
487 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | |
488 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | |
a32f8d8e TH |
489 | |
490 | /* | |
eba11788 TH |
491 | * Operations with implied preemption protection. These operations can be |
492 | * used without worrying about preemption. Note that interrupts may still | |
493 | * occur while an operation is in progress and if the interrupt modifies | |
494 | * the variable too then RMW actions may not be reliable. | |
a32f8d8e | 495 | */ |
eba11788 TH |
496 | #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) |
497 | #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) | |
498 | #define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) | |
499 | #define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) | |
500 | #define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) | |
501 | #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | |
502 | #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) | |
503 | #define this_cpu_cmpxchg(pcp, oval, nval) \ | |
504 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | |
505 | #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | |
506 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) | |
507 | ||
508 | #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) | |
509 | #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) | |
510 | #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) | |
a32f8d8e TH |
511 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
512 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | |
513 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | |
a32f8d8e | 514 | |
62fde541 | 515 | #endif /* __ASSEMBLY__ */ |
5028eaa9 | 516 | #endif /* _LINUX_PERCPU_DEFS_H */ |