KVM: Maintain back mapping from irqchip/pin to gsi
[deliverable/linux.git] / include / linux / percpu-defs.h
CommitLineData
5028eaa9
DH
1#ifndef _LINUX_PERCPU_DEFS_H
2#define _LINUX_PERCPU_DEFS_H
3
4/*
5 * Determine the real variable name from the name visible in the
6 * kernel sources.
7 */
8#define per_cpu_var(var) per_cpu__##var
9
10/*
11 * Base implementations of per-CPU variable declarations and definitions, where
12 * the section in which the variable is to be placed is provided by the
7c756e6e 13 * 'sec' argument. This may be used to affect the parameters governing the
5028eaa9
DH
14 * variable's storage.
15 *
16 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
17 * linkage errors occur due the compiler generating the wrong code to access
18 * that section.
19 */
7c756e6e
TH
20#define __PCPU_ATTRS(sec) \
21 __attribute__((section(PER_CPU_BASE_SECTION sec))) \
22 PER_CPU_ATTRIBUTES
23
24#define __PCPU_DUMMY_ATTRS \
25 __attribute__((section(".discard"), unused))
26
27/*
28 * s390 and alpha modules require percpu variables to be defined as
29 * weak to force the compiler to generate GOT based external
30 * references for them. This is necessary because percpu sections
31 * will be located outside of the usually addressable area.
32 *
33 * This definition puts the following two extra restrictions when
34 * defining percpu variables.
35 *
36 * 1. The symbol must be globally unique, even the static ones.
37 * 2. Static percpu variables cannot be defined inside a function.
38 *
39 * Archs which need weak percpu definitions should define
40 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
41 *
42 * To ensure that the generic code observes the above two
43 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
44 * definition is used for all cases.
45 */
46#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
47/*
48 * __pcpu_scope_* dummy variable is used to enforce scope. It
49 * receives the static modifier when it's used in front of
50 * DEFINE_PER_CPU() and will trigger build failure if
51 * DECLARE_PER_CPU() is used for the same variable.
52 *
53 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
54 * such that hidden weak symbol collision, which will cause unrelated
55 * variables to share the same address, can be detected during build.
56 */
57#define DECLARE_PER_CPU_SECTION(type, name, sec) \
58 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
1a8dd307 59 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
7c756e6e
TH
60
61#define DEFINE_PER_CPU_SECTION(type, name, sec) \
62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
c43768cb
TH
64 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
65 __typeof__(type) per_cpu__##name
7c756e6e
TH
66#else
67/*
68 * Normal declaration and definition macros.
69 */
70#define DECLARE_PER_CPU_SECTION(type, name, sec) \
71 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
72
73#define DEFINE_PER_CPU_SECTION(type, name, sec) \
c43768cb 74 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
b01e8dc3 75 __typeof__(type) per_cpu__##name
7c756e6e 76#endif
5028eaa9
DH
77
78/*
79 * Variant on the per-CPU variable declaration/definition theme used for
80 * ordinary per-CPU variables.
81 */
82#define DECLARE_PER_CPU(type, name) \
83 DECLARE_PER_CPU_SECTION(type, name, "")
84
85#define DEFINE_PER_CPU(type, name) \
86 DEFINE_PER_CPU_SECTION(type, name, "")
87
88/*
89 * Declaration/definition used for per-CPU variables that must come first in
90 * the set of variables.
91 */
92#define DECLARE_PER_CPU_FIRST(type, name) \
93 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
94
95#define DEFINE_PER_CPU_FIRST(type, name) \
96 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
97
98/*
99 * Declaration/definition used for per-CPU variables that must be cacheline
100 * aligned under SMP conditions so that, whilst a particular instance of the
101 * data corresponds to a particular CPU, inefficiencies due to direct access by
102 * other CPUs are reduced by preventing the data from unnecessarily spanning
103 * cachelines.
104 *
105 * An example of this would be statistical data, where each CPU's set of data
106 * is updated by that CPU alone, but the data from across all CPUs is collated
107 * by a CPU processing a read from a proc file.
108 */
109#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
110 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
111 ____cacheline_aligned_in_smp
112
113#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
114 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
115 ____cacheline_aligned_in_smp
116
53f82452
JF
117#define DECLARE_PER_CPU_ALIGNED(type, name) \
118 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
119 ____cacheline_aligned
120
121#define DEFINE_PER_CPU_ALIGNED(type, name) \
122 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
123 ____cacheline_aligned
124
5028eaa9
DH
125/*
126 * Declaration/definition used for per-CPU variables that must be page aligned.
127 */
3e352aa8
TH
128#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
129 DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \
130 __aligned(PAGE_SIZE)
5028eaa9
DH
131
132#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
3e352aa8
TH
133 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \
134 __aligned(PAGE_SIZE)
5028eaa9
DH
135
136/*
137 * Intermodule exports for per-CPU variables.
138 */
139#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
140#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
141
142
143#endif /* _LINUX_PERCPU_DEFS_H */
This page took 0.087745 seconds and 5 git commands to generate.