2 * linux/percpu-defs.h - basic definitions for percpu areas
4 * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
6 * This file is separate from linux/percpu.h to avoid cyclic inclusion
7 * dependency from arch header files. Only to be included from
10 * This file includes macros necessary to declare percpu sections and
11 * variables, and definitions of percpu accessors and operations. It
12 * should provide enough percpu features to arch header files even when
13 * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
16 #ifndef _LINUX_PERCPU_DEFS_H
17 #define _LINUX_PERCPU_DEFS_H
22 #define PER_CPU_SHARED_ALIGNED_SECTION ""
23 #define PER_CPU_ALIGNED_SECTION ""
25 #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
26 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
28 #define PER_CPU_FIRST_SECTION "..first"
32 #define PER_CPU_SHARED_ALIGNED_SECTION ""
33 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
34 #define PER_CPU_FIRST_SECTION ""
39 * Base implementations of per-CPU variable declarations and definitions, where
40 * the section in which the variable is to be placed is provided by the
41 * 'sec' argument. This may be used to affect the parameters governing the
44 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
45 * linkage errors occur due the compiler generating the wrong code to access
48 #define __PCPU_ATTRS(sec) \
49 __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
52 #define __PCPU_DUMMY_ATTRS \
53 __attribute__((section(".discard"), unused))
56 * s390 and alpha modules require percpu variables to be defined as
57 * weak to force the compiler to generate GOT based external
58 * references for them. This is necessary because percpu sections
59 * will be located outside of the usually addressable area.
61 * This definition puts the following two extra restrictions when
62 * defining percpu variables.
64 * 1. The symbol must be globally unique, even the static ones.
65 * 2. Static percpu variables cannot be defined inside a function.
67 * Archs which need weak percpu definitions should define
68 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
70 * To ensure that the generic code observes the above two
71 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
72 * definition is used for all cases.
74 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
76 * __pcpu_scope_* dummy variable is used to enforce scope. It
77 * receives the static modifier when it's used in front of
78 * DEFINE_PER_CPU() and will trigger build failure if
79 * DECLARE_PER_CPU() is used for the same variable.
81 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
82 * such that hidden weak symbol collision, which will cause unrelated
83 * variables to share the same address, can be detected during build.
85 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
86 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
87 extern __PCPU_ATTRS(sec) __typeof__(type) name
89 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
90 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
91 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
92 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
93 extern __PCPU_ATTRS(sec) __typeof__(type) name; \
94 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
98 * Normal declaration and definition macros.
100 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
101 extern __PCPU_ATTRS(sec) __typeof__(type) name
103 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
104 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
105 __typeof__(type) name
109 * Variant on the per-CPU variable declaration/definition theme used for
110 * ordinary per-CPU variables.
112 #define DECLARE_PER_CPU(type, name) \
113 DECLARE_PER_CPU_SECTION(type, name, "")
115 #define DEFINE_PER_CPU(type, name) \
116 DEFINE_PER_CPU_SECTION(type, name, "")
119 * Declaration/definition used for per-CPU variables that must come first in
120 * the set of variables.
122 #define DECLARE_PER_CPU_FIRST(type, name) \
123 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
125 #define DEFINE_PER_CPU_FIRST(type, name) \
126 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
129 * Declaration/definition used for per-CPU variables that must be cacheline
130 * aligned under SMP conditions so that, whilst a particular instance of the
131 * data corresponds to a particular CPU, inefficiencies due to direct access by
132 * other CPUs are reduced by preventing the data from unnecessarily spanning
135 * An example of this would be statistical data, where each CPU's set of data
136 * is updated by that CPU alone, but the data from across all CPUs is collated
137 * by a CPU processing a read from a proc file.
139 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
140 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
141 ____cacheline_aligned_in_smp
143 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
144 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
145 ____cacheline_aligned_in_smp
147 #define DECLARE_PER_CPU_ALIGNED(type, name) \
148 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
149 ____cacheline_aligned
151 #define DEFINE_PER_CPU_ALIGNED(type, name) \
152 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
153 ____cacheline_aligned
156 * Declaration/definition used for per-CPU variables that must be page aligned.
158 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
159 DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
162 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
163 DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
167 * Declaration/definition used for per-CPU variables that must be read mostly.
169 #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
170 DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
172 #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
173 DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
176 * Intermodule exports for per-CPU variables. sparse forgets about
177 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
178 * noop if __CHECKER__.
181 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
182 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
184 #define EXPORT_PER_CPU_SYMBOL(var)
185 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
189 * Accessors and operations.
194 * Macro which verifies @ptr is a percpu pointer without evaluating
195 * @ptr. This is to be used in percpu accessors to verify that the
196 * input parameter is a percpu pointer.
198 * + 0 is required in order to convert the pointer type from a
199 * potential array type to a pointer to a single item of the array.
201 #define __verify_pcpu_ptr(ptr) do { \
202 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
203 (void)__vpp_verify; \
209 * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
210 * to prevent the compiler from making incorrect assumptions about the
211 * pointer value. The weird cast keeps both GCC and sparse happy.
213 #define SHIFT_PERCPU_PTR(__p, __offset) ({ \
214 __verify_pcpu_ptr((__p)); \
215 RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
218 #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
219 #define raw_cpu_ptr(ptr) arch_raw_cpu_ptr(ptr)
221 #ifdef CONFIG_DEBUG_PREEMPT
222 #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
224 #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
227 #else /* CONFIG_SMP */
229 #define VERIFY_PERCPU_PTR(__p) ({ \
230 __verify_pcpu_ptr((__p)); \
231 (typeof(*(__p)) __kernel __force *)(__p); \
234 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
235 #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
236 #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
238 #endif /* CONFIG_SMP */
240 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
241 #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
242 #define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
244 /* keep until we have removed all uses of __this_cpu_ptr */
245 #define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
248 * Must be an lvalue. Since @var must be a simple identifier,
249 * we force a syntax error here if it isn't.
251 #define get_cpu_var(var) (*({ \
253 this_cpu_ptr(&var); }))
256 * The weird & is necessary because sparse considers (void)(var) to be
257 * a direct dereference of percpu variable (var).
259 #define put_cpu_var(var) do { \
264 #define get_cpu_ptr(var) ({ \
266 this_cpu_ptr(var); })
268 #define put_cpu_ptr(var) do { \
274 * Branching function to split up a function into a set of functions that
275 * are called for different scalar sizes of the objects handled.
278 extern void __bad_size_call_parameter(void);
280 #ifdef CONFIG_DEBUG_PREEMPT
281 extern void __this_cpu_preempt_check(const char *op
);
283 static inline void __this_cpu_preempt_check(const char *op
) { }
286 #define __pcpu_size_call_return(stem, variable) \
287 ({ typeof(variable) pscr_ret__; \
288 __verify_pcpu_ptr(&(variable)); \
289 switch(sizeof(variable)) { \
290 case 1: pscr_ret__ = stem##1(variable);break; \
291 case 2: pscr_ret__ = stem##2(variable);break; \
292 case 4: pscr_ret__ = stem##4(variable);break; \
293 case 8: pscr_ret__ = stem##8(variable);break; \
295 __bad_size_call_parameter();break; \
300 #define __pcpu_size_call_return2(stem, variable, ...) \
302 typeof(variable) pscr2_ret__; \
303 __verify_pcpu_ptr(&(variable)); \
304 switch(sizeof(variable)) { \
305 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
306 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
307 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
308 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
310 __bad_size_call_parameter(); break; \
316 * Special handling for cmpxchg_double. cmpxchg_double is passed two
317 * percpu variables. The first has to be aligned to a double word
318 * boundary and the second has to follow directly thereafter.
319 * We enforce this on all architectures even if they don't support
320 * a double cmpxchg instruction, since it's a cheap requirement, and it
321 * avoids breaking the requirement for architectures with the instruction.
323 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
326 __verify_pcpu_ptr(&pcp1); \
327 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
328 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
329 VM_BUG_ON((unsigned long)(&pcp2) != \
330 (unsigned long)(&pcp1) + sizeof(pcp1)); \
331 switch(sizeof(pcp1)) { \
332 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
333 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
334 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
335 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
337 __bad_size_call_parameter(); break; \
342 #define __pcpu_size_call(stem, variable, ...) \
344 __verify_pcpu_ptr(&(variable)); \
345 switch(sizeof(variable)) { \
346 case 1: stem##1(variable, __VA_ARGS__);break; \
347 case 2: stem##2(variable, __VA_ARGS__);break; \
348 case 4: stem##4(variable, __VA_ARGS__);break; \
349 case 8: stem##8(variable, __VA_ARGS__);break; \
351 __bad_size_call_parameter();break; \
356 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
358 * Optimized manipulation for memory allocated through the per cpu
359 * allocator or for addresses of per cpu variables.
361 * These operation guarantee exclusivity of access for other operations
362 * on the *same* processor. The assumption is that per cpu data is only
363 * accessed by a single processor instance (the current one).
365 * The arch code can provide optimized implementation by defining macros
366 * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
367 * cpu atomic operations for 2 byte sized RMW actions. If arch code does
368 * not provide operations for a scalar size then the fallback in the
369 * generic code will be used.
373 * Generic percpu operations for contexts where we do not want to do
374 * any checks for preemptiosn.
376 * If there is no other protection through preempt disable and/or
377 * disabling interupts then one of these RMW operations can show unexpected
378 * behavior because the execution thread was rescheduled on another processor
379 * or an interrupt occurred and the same percpu variable was modified from
380 * the interrupt context.
382 # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
383 # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
384 # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
385 # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
386 # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
387 # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
388 # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
389 # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
390 # define raw_cpu_add_return(pcp, val) \
391 __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
392 #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
393 #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
394 #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
395 # define raw_cpu_xchg(pcp, nval) \
396 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
397 # define raw_cpu_cmpxchg(pcp, oval, nval) \
398 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
399 # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
400 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
403 * Generic percpu operations for context that are safe from preemption/interrupts.
405 # define __this_cpu_read(pcp) \
406 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
408 # define __this_cpu_write(pcp, val) \
409 do { __this_cpu_preempt_check("write"); \
410 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
413 # define __this_cpu_add(pcp, val) \
414 do { __this_cpu_preempt_check("add"); \
415 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
418 # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
419 # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
420 # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
422 # define __this_cpu_and(pcp, val) \
423 do { __this_cpu_preempt_check("and"); \
424 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
427 # define __this_cpu_or(pcp, val) \
428 do { __this_cpu_preempt_check("or"); \
429 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
432 # define __this_cpu_add_return(pcp, val) \
433 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
435 #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
436 #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
437 #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
439 # define __this_cpu_xchg(pcp, nval) \
440 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
442 # define __this_cpu_cmpxchg(pcp, oval, nval) \
443 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
445 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
446 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
449 * this_cpu_*() operations are used for accesses that must be done in a
450 * preemption safe way since we know that the context is not preempt
451 * safe. Interrupts may occur. If the interrupt modifies the variable too
452 * then RMW actions will not be reliable.
454 # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
455 # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
456 # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
457 # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
458 # define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
459 # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
460 # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
461 # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
462 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
463 #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
464 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
465 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
466 # define this_cpu_xchg(pcp, nval) \
467 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
468 # define this_cpu_cmpxchg(pcp, oval, nval) \
469 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
472 * cmpxchg_double replaces two adjacent scalars at once. The first
473 * two parameters are per cpu variables which have to be of the same
474 * size. A truth value is returned to indicate success or failure
475 * (since a double register result is difficult to handle). There is
476 * very limited hardware support for these operations, so only certain
479 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
480 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
482 #endif /* __ASSEMBLY__ */
483 #endif /* _LINUX_PERCPU_DEFS_H */
This page took 0.0435 seconds and 6 git commands to generate.