local_t: Move local.h include to ringbuffer.c and ring_buffer_benchmark.c
[deliverable/linux.git] / arch / x86 / include / asm / percpu.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PERCPU_H
2#define _ASM_X86_PERCPU_H
3334052a 3
1a51e3a0 4#ifdef CONFIG_X86_64
9939ddaf
TH
5#define __percpu_seg gs
6#define __percpu_mov_op movq
1a51e3a0 7#else
9939ddaf
TH
8#define __percpu_seg fs
9#define __percpu_mov_op movl
96a388de 10#endif
3334052a 11
12#ifdef __ASSEMBLY__
13
14/*
15 * PER_CPU finds an address of a per-cpu variable.
16 *
17 * Args:
18 * var - variable name
19 * reg - 32bit register
20 *
21 * The resulting address is stored in the "reg" argument.
22 *
23 * Example:
24 * PER_CPU(cpu_gdt_descr, %ebx)
25 */
26#ifdef CONFIG_SMP
9939ddaf 27#define PER_CPU(var, reg) \
dd17c8f7
RR
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:var
3334052a 31#else /* ! SMP */
dd17c8f7
RR
32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33#define PER_CPU_VAR(var) var
3334052a 34#endif /* SMP */
35
2add8e23
BG
36#ifdef CONFIG_X86_64_SMP
37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
38#else
dd17c8f7 39#define INIT_PER_CPU_VAR(var) var
2add8e23
BG
40#endif
41
3334052a 42#else /* ...!ASSEMBLY */
43
e59a1bb2 44#include <linux/kernel.h>
9939ddaf 45#include <linux/stringify.h>
3334052a 46
9939ddaf 47#ifdef CONFIG_SMP
87b26406 48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
6dbde353 49#define __my_cpu_offset percpu_read(this_cpu_off)
9939ddaf 50#else
ed8d9adf 51#define __percpu_arg(x) "%P" #x
9939ddaf 52#endif
3334052a 53
2add8e23
BG
54/*
55 * Initialized pointers to per-cpu variables needed for the boot
56 * processor need to use these macros to get the proper address
57 * offset from __per_cpu_load on SMP.
58 *
59 * There also must be an entry in vmlinux_64.lds.S
60 */
61#define DECLARE_INIT_PER_CPU(var) \
dd17c8f7 62 extern typeof(var) init_per_cpu_var(var)
2add8e23
BG
63
64#ifdef CONFIG_X86_64_SMP
65#define init_per_cpu_var(var) init_per_cpu__##var
66#else
dd17c8f7 67#define init_per_cpu_var(var) var
2add8e23
BG
68#endif
69
3334052a 70/* For arch-specific code, we can use direct single-insn ops (they
71 * don't give an lvalue though). */
72extern void __bad_percpu_size(void);
73
bc9e3be2
JP
74#define percpu_to_op(op, var, val) \
75do { \
0f5e4816 76 typedef typeof(var) pto_T__; \
bc9e3be2 77 if (0) { \
0f5e4816
TH
78 pto_T__ pto_tmp__; \
79 pto_tmp__ = (val); \
bc9e3be2
JP
80 } \
81 switch (sizeof(var)) { \
82 case 1: \
87b26406 83 asm(op "b %1,"__percpu_arg(0) \
bc9e3be2 84 : "+m" (var) \
0f5e4816 85 : "qi" ((pto_T__)(val))); \
bc9e3be2
JP
86 break; \
87 case 2: \
87b26406 88 asm(op "w %1,"__percpu_arg(0) \
bc9e3be2 89 : "+m" (var) \
0f5e4816 90 : "ri" ((pto_T__)(val))); \
bc9e3be2
JP
91 break; \
92 case 4: \
87b26406 93 asm(op "l %1,"__percpu_arg(0) \
bc9e3be2 94 : "+m" (var) \
0f5e4816 95 : "ri" ((pto_T__)(val))); \
bc9e3be2 96 break; \
9939ddaf 97 case 8: \
87b26406 98 asm(op "q %1,"__percpu_arg(0) \
9939ddaf 99 : "+m" (var) \
0f5e4816 100 : "re" ((pto_T__)(val))); \
9939ddaf 101 break; \
bc9e3be2
JP
102 default: __bad_percpu_size(); \
103 } \
104} while (0)
105
ed8d9adf 106#define percpu_from_op(op, var, constraint) \
bc9e3be2 107({ \
0f5e4816 108 typeof(var) pfo_ret__; \
bc9e3be2
JP
109 switch (sizeof(var)) { \
110 case 1: \
87b26406 111 asm(op "b "__percpu_arg(1)",%0" \
0f5e4816 112 : "=q" (pfo_ret__) \
ed8d9adf 113 : constraint); \
bc9e3be2
JP
114 break; \
115 case 2: \
87b26406 116 asm(op "w "__percpu_arg(1)",%0" \
0f5e4816 117 : "=r" (pfo_ret__) \
ed8d9adf 118 : constraint); \
bc9e3be2
JP
119 break; \
120 case 4: \
87b26406 121 asm(op "l "__percpu_arg(1)",%0" \
0f5e4816 122 : "=r" (pfo_ret__) \
ed8d9adf 123 : constraint); \
9939ddaf
TH
124 break; \
125 case 8: \
87b26406 126 asm(op "q "__percpu_arg(1)",%0" \
0f5e4816 127 : "=r" (pfo_ret__) \
ed8d9adf 128 : constraint); \
bc9e3be2
JP
129 break; \
130 default: __bad_percpu_size(); \
131 } \
0f5e4816 132 pfo_ret__; \
bc9e3be2 133})
3334052a 134
ed8d9adf
LT
135/*
136 * percpu_read() makes gcc load the percpu variable every time it is
137 * accessed while percpu_read_stable() allows the value to be cached.
138 * percpu_read_stable() is more efficient and can be used if its value
139 * is guaranteed to be valid across cpus. The current users include
140 * get_current() and get_thread_info() both of which are actually
141 * per-thread variables implemented as per-cpu variables and thus
142 * stable for the duration of the respective task.
143 */
dd17c8f7
RR
144#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
145#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
146#define percpu_write(var, val) percpu_to_op("mov", var, val)
147#define percpu_add(var, val) percpu_to_op("add", var, val)
148#define percpu_sub(var, val) percpu_to_op("sub", var, val)
149#define percpu_and(var, val) percpu_to_op("and", var, val)
150#define percpu_or(var, val) percpu_to_op("or", var, val)
151#define percpu_xor(var, val) percpu_to_op("xor", var, val)
9939ddaf 152
30ed1a79
CL
153#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
154#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
155#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
156
157#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
158#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
159#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
160#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
161#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
162#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
163#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
164#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
165#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
166#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
167#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
168#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
169#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
170#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
171#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
172
173#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
174#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
175#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
176#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
177#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
178#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
179#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
180#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
181#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
182#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
183#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
184#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
185#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
186#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
187#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
188#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
189#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
190#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
191
192#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
193#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
194#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
195#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
196#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
197#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
198#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
199#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
200#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
201#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
202#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
203#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
204
205/*
206 * Per cpu atomic 64 bit operations are only available under 64 bit.
207 * 32 bit must fall back to generic operations.
208 */
209#ifdef CONFIG_X86_64
210#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
211#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
212#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
213#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
214#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
215#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
216
217#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
218#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
219#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
220#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
221#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
222#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
223
224#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
225#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
226#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
227#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
228
229#endif
230
49357d19
TH
231/* This is not atomic against other CPUs -- CPU preemption needs to be off */
232#define x86_test_and_clear_bit_percpu(bit, var) \
233({ \
234 int old__; \
87b26406 235 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
dd17c8f7 236 : "=r" (old__), "+m" (var) \
87b26406 237 : "dIr" (bit)); \
49357d19
TH
238 old__; \
239})
240
6dbde353
IM
241#include <asm-generic/percpu.h>
242
243/* We can use this directly for local CPU (faster). */
244DECLARE_PER_CPU(unsigned long, this_cpu_off);
245
3334052a 246#endif /* !__ASSEMBLY__ */
23ca4bba
MT
247
248#ifdef CONFIG_SMP
249
250/*
251 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
252 * variables that are initialized and accessed before there are per_cpu
253 * areas allocated.
254 */
255
256#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
257 DEFINE_PER_CPU(_type, _name) = _initvalue; \
258 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
259 { [0 ... NR_CPUS-1] = _initvalue }; \
c6a92a25 260 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
23ca4bba
MT
261
262#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
263 EXPORT_PER_CPU_SYMBOL(_name)
264
265#define DECLARE_EARLY_PER_CPU(_type, _name) \
266 DECLARE_PER_CPU(_type, _name); \
267 extern __typeof__(_type) *_name##_early_ptr; \
268 extern __typeof__(_type) _name##_early_map[]
269
270#define early_per_cpu_ptr(_name) (_name##_early_ptr)
271#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
272#define early_per_cpu(_name, _cpu) \
f10fcd47
TH
273 *(early_per_cpu_ptr(_name) ? \
274 &early_per_cpu_ptr(_name)[_cpu] : \
275 &per_cpu(_name, _cpu))
23ca4bba
MT
276
277#else /* !CONFIG_SMP */
278#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
279 DEFINE_PER_CPU(_type, _name) = _initvalue
280
281#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
282 EXPORT_PER_CPU_SYMBOL(_name)
283
284#define DECLARE_EARLY_PER_CPU(_type, _name) \
285 DECLARE_PER_CPU(_type, _name)
286
287#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
288#define early_per_cpu_ptr(_name) NULL
289/* no early_per_cpu_map() */
290
291#endif /* !CONFIG_SMP */
292
1965aae3 293#endif /* _ASM_X86_PERCPU_H */
This page took 0.282984 seconds and 5 git commands to generate.