2 * linux/percpu-defs.h - basic definitions for percpu areas
4 * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
6 * This file is separate from linux/percpu.h to avoid cyclic inclusion
7 * dependency from arch header files. Only to be included from
10 * This file includes macros necessary to declare percpu sections and
11 * variables, and definitions of percpu accessors and operations. It
12 * should provide enough percpu features to arch header files even when
13 * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
16 #ifndef _LINUX_PERCPU_DEFS_H
17 #define _LINUX_PERCPU_DEFS_H
22 #define PER_CPU_SHARED_ALIGNED_SECTION ""
23 #define PER_CPU_ALIGNED_SECTION ""
25 #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
26 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
28 #define PER_CPU_FIRST_SECTION "..first"
32 #define PER_CPU_SHARED_ALIGNED_SECTION ""
33 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
34 #define PER_CPU_FIRST_SECTION ""
39 * Base implementations of per-CPU variable declarations and definitions, where
40 * the section in which the variable is to be placed is provided by the
41 * 'sec' argument. This may be used to affect the parameters governing the
44 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
45 * linkage errors occur due the compiler generating the wrong code to access
48 #define __PCPU_ATTRS(sec) \
49 __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
52 #define __PCPU_DUMMY_ATTRS \
53 __attribute__((section(".discard"), unused))
56 * Macro which verifies @ptr is a percpu pointer without evaluating
57 * @ptr. This is to be used in percpu accessors to verify that the
58 * input parameter is a percpu pointer.
60 * + 0 is required in order to convert the pointer type from a
61 * potential array type to a pointer to a single item of the array.
63 #define __verify_pcpu_ptr(ptr) do { \
64 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
69 * s390 and alpha modules require percpu variables to be defined as
70 * weak to force the compiler to generate GOT based external
71 * references for them. This is necessary because percpu sections
72 * will be located outside of the usually addressable area.
74 * This definition puts the following two extra restrictions when
75 * defining percpu variables.
77 * 1. The symbol must be globally unique, even the static ones.
78 * 2. Static percpu variables cannot be defined inside a function.
80 * Archs which need weak percpu definitions should define
81 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
83 * To ensure that the generic code observes the above two
84 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
85 * definition is used for all cases.
87 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
89 * __pcpu_scope_* dummy variable is used to enforce scope. It
90 * receives the static modifier when it's used in front of
91 * DEFINE_PER_CPU() and will trigger build failure if
92 * DECLARE_PER_CPU() is used for the same variable.
94 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
95 * such that hidden weak symbol collision, which will cause unrelated
96 * variables to share the same address, can be detected during build.
98 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
99 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
100 extern __PCPU_ATTRS(sec) __typeof__(type) name
102 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
103 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
104 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
105 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
106 extern __PCPU_ATTRS(sec) __typeof__(type) name; \
107 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
108 __typeof__(type) name
111 * Normal declaration and definition macros.
113 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
114 extern __PCPU_ATTRS(sec) __typeof__(type) name
116 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
117 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
118 __typeof__(type) name
122 * Variant on the per-CPU variable declaration/definition theme used for
123 * ordinary per-CPU variables.
125 #define DECLARE_PER_CPU(type, name) \
126 DECLARE_PER_CPU_SECTION(type, name, "")
128 #define DEFINE_PER_CPU(type, name) \
129 DEFINE_PER_CPU_SECTION(type, name, "")
132 * Declaration/definition used for per-CPU variables that must come first in
133 * the set of variables.
135 #define DECLARE_PER_CPU_FIRST(type, name) \
136 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
138 #define DEFINE_PER_CPU_FIRST(type, name) \
139 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
142 * Declaration/definition used for per-CPU variables that must be cacheline
143 * aligned under SMP conditions so that, whilst a particular instance of the
144 * data corresponds to a particular CPU, inefficiencies due to direct access by
145 * other CPUs are reduced by preventing the data from unnecessarily spanning
148 * An example of this would be statistical data, where each CPU's set of data
149 * is updated by that CPU alone, but the data from across all CPUs is collated
150 * by a CPU processing a read from a proc file.
152 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
153 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
154 ____cacheline_aligned_in_smp
156 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
157 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
158 ____cacheline_aligned_in_smp
160 #define DECLARE_PER_CPU_ALIGNED(type, name) \
161 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
162 ____cacheline_aligned
164 #define DEFINE_PER_CPU_ALIGNED(type, name) \
165 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
166 ____cacheline_aligned
169 * Declaration/definition used for per-CPU variables that must be page aligned.
171 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
172 DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
175 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
176 DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
180 * Declaration/definition used for per-CPU variables that must be read mostly.
182 #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
183 DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
185 #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
186 DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
189 * Intermodule exports for per-CPU variables. sparse forgets about
190 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
191 * noop if __CHECKER__.
194 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
195 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
197 #define EXPORT_PER_CPU_SYMBOL(var)
198 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
202 * Accessors and operations.
209 * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
210 * to prevent the compiler from making incorrect assumptions about the
211 * pointer value. The weird cast keeps both GCC and sparse happy.
213 #define SHIFT_PERCPU_PTR(__p, __offset) ({ \
214 __verify_pcpu_ptr((__p)); \
215 RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
219 * A percpu variable may point to a discarded regions. The following are
220 * established ways to produce a usable pointer from the percpu variable
223 #define per_cpu(var, cpu) \
224 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
226 #define raw_cpu_ptr(ptr) arch_raw_cpu_ptr(ptr)
228 #ifdef CONFIG_DEBUG_PREEMPT
229 #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
231 #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
234 #define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
235 #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
237 #else /* CONFIG_SMP */
239 #define VERIFY_PERCPU_PTR(__p) ({ \
240 __verify_pcpu_ptr((__p)); \
241 (typeof(*(__p)) __kernel __force *)(__p); \
244 #define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
245 #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
246 #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
247 #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
248 #define raw_cpu_ptr(ptr) this_cpu_ptr(ptr)
250 #endif /* CONFIG_SMP */
252 /* keep until we have removed all uses of __this_cpu_ptr */
253 #define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
256 * Must be an lvalue. Since @var must be a simple identifier,
257 * we force a syntax error here if it isn't.
259 #define get_cpu_var(var) (*({ \
261 this_cpu_ptr(&var); }))
264 * The weird & is necessary because sparse considers (void)(var) to be
265 * a direct dereference of percpu variable (var).
267 #define put_cpu_var(var) do { \
272 #define get_cpu_ptr(var) ({ \
274 this_cpu_ptr(var); })
276 #define put_cpu_ptr(var) do { \
282 #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
284 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
287 #endif /* __ASSEMBLY__ */
288 #endif /* _LINUX_PERCPU_DEFS_H */
This page took 0.03846 seconds and 6 git commands to generate.