percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
[deliverable/linux.git] / arch / x86 / include / asm / percpu.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PERCPU_H
2#define _ASM_X86_PERCPU_H
3334052a 3
1a51e3a0 4#ifdef CONFIG_X86_64
9939ddaf
TH
5#define __percpu_seg gs
6#define __percpu_mov_op movq
1a51e3a0 7#else
9939ddaf
TH
8#define __percpu_seg fs
9#define __percpu_mov_op movl
96a388de 10#endif
3334052a 11
12#ifdef __ASSEMBLY__
13
14/*
15 * PER_CPU finds an address of a per-cpu variable.
16 *
17 * Args:
18 * var - variable name
19 * reg - 32bit register
20 *
21 * The resulting address is stored in the "reg" argument.
22 *
23 * Example:
24 * PER_CPU(cpu_gdt_descr, %ebx)
25 */
26#ifdef CONFIG_SMP
9939ddaf 27#define PER_CPU(var, reg) \
dd17c8f7
RR
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:var
3334052a 31#else /* ! SMP */
dd17c8f7
RR
32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33#define PER_CPU_VAR(var) var
3334052a 34#endif /* SMP */
35
2add8e23
BG
36#ifdef CONFIG_X86_64_SMP
37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
38#else
dd17c8f7 39#define INIT_PER_CPU_VAR(var) var
2add8e23
BG
40#endif
41
3334052a 42#else /* ...!ASSEMBLY */
43
e59a1bb2 44#include <linux/kernel.h>
9939ddaf 45#include <linux/stringify.h>
3334052a 46
9939ddaf 47#ifdef CONFIG_SMP
87b26406 48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
6dbde353 49#define __my_cpu_offset percpu_read(this_cpu_off)
db7829c6
BG
50
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
54 */
55#define __this_cpu_ptr(ptr) \
56({ \
57 unsigned long tcp_ptr__; \
58 __verify_pcpu_ptr(ptr); \
59 asm volatile("add " __percpu_arg(1) ", %0" \
60 : "=r" (tcp_ptr__) \
61 : "m" (this_cpu_off), "0" (ptr)); \
62 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
63})
9939ddaf 64#else
ed8d9adf 65#define __percpu_arg(x) "%P" #x
9939ddaf 66#endif
3334052a 67
2add8e23
BG
68/*
69 * Initialized pointers to per-cpu variables needed for the boot
70 * processor need to use these macros to get the proper address
71 * offset from __per_cpu_load on SMP.
72 *
73 * There also must be an entry in vmlinux_64.lds.S
74 */
75#define DECLARE_INIT_PER_CPU(var) \
dd17c8f7 76 extern typeof(var) init_per_cpu_var(var)
2add8e23
BG
77
78#ifdef CONFIG_X86_64_SMP
79#define init_per_cpu_var(var) init_per_cpu__##var
80#else
dd17c8f7 81#define init_per_cpu_var(var) var
2add8e23
BG
82#endif
83
3334052a 84/* For arch-specific code, we can use direct single-insn ops (they
85 * don't give an lvalue though). */
86extern void __bad_percpu_size(void);
87
bc9e3be2
JP
88#define percpu_to_op(op, var, val) \
89do { \
0f5e4816 90 typedef typeof(var) pto_T__; \
bc9e3be2 91 if (0) { \
0f5e4816
TH
92 pto_T__ pto_tmp__; \
93 pto_tmp__ = (val); \
23b764d0 94 (void)pto_tmp__; \
bc9e3be2
JP
95 } \
96 switch (sizeof(var)) { \
97 case 1: \
87b26406 98 asm(op "b %1,"__percpu_arg(0) \
bc9e3be2 99 : "+m" (var) \
0f5e4816 100 : "qi" ((pto_T__)(val))); \
bc9e3be2
JP
101 break; \
102 case 2: \
87b26406 103 asm(op "w %1,"__percpu_arg(0) \
bc9e3be2 104 : "+m" (var) \
0f5e4816 105 : "ri" ((pto_T__)(val))); \
bc9e3be2
JP
106 break; \
107 case 4: \
87b26406 108 asm(op "l %1,"__percpu_arg(0) \
bc9e3be2 109 : "+m" (var) \
0f5e4816 110 : "ri" ((pto_T__)(val))); \
bc9e3be2 111 break; \
9939ddaf 112 case 8: \
87b26406 113 asm(op "q %1,"__percpu_arg(0) \
9939ddaf 114 : "+m" (var) \
0f5e4816 115 : "re" ((pto_T__)(val))); \
9939ddaf 116 break; \
bc9e3be2
JP
117 default: __bad_percpu_size(); \
118 } \
119} while (0)
120
5917dae8
CL
121/*
122 * Generate a percpu add to memory instruction and optimize code
40f0a5d0 123 * if one is added or subtracted.
5917dae8
CL
124 */
125#define percpu_add_op(var, val) \
126do { \
127 typedef typeof(var) pao_T__; \
128 const int pao_ID__ = (__builtin_constant_p(val) && \
129 ((val) == 1 || (val) == -1)) ? (val) : 0; \
130 if (0) { \
131 pao_T__ pao_tmp__; \
132 pao_tmp__ = (val); \
23b764d0 133 (void)pao_tmp__; \
5917dae8
CL
134 } \
135 switch (sizeof(var)) { \
136 case 1: \
137 if (pao_ID__ == 1) \
138 asm("incb "__percpu_arg(0) : "+m" (var)); \
139 else if (pao_ID__ == -1) \
140 asm("decb "__percpu_arg(0) : "+m" (var)); \
141 else \
142 asm("addb %1, "__percpu_arg(0) \
143 : "+m" (var) \
144 : "qi" ((pao_T__)(val))); \
145 break; \
146 case 2: \
147 if (pao_ID__ == 1) \
148 asm("incw "__percpu_arg(0) : "+m" (var)); \
149 else if (pao_ID__ == -1) \
150 asm("decw "__percpu_arg(0) : "+m" (var)); \
151 else \
152 asm("addw %1, "__percpu_arg(0) \
153 : "+m" (var) \
154 : "ri" ((pao_T__)(val))); \
155 break; \
156 case 4: \
157 if (pao_ID__ == 1) \
158 asm("incl "__percpu_arg(0) : "+m" (var)); \
159 else if (pao_ID__ == -1) \
160 asm("decl "__percpu_arg(0) : "+m" (var)); \
161 else \
162 asm("addl %1, "__percpu_arg(0) \
163 : "+m" (var) \
164 : "ri" ((pao_T__)(val))); \
165 break; \
166 case 8: \
167 if (pao_ID__ == 1) \
168 asm("incq "__percpu_arg(0) : "+m" (var)); \
169 else if (pao_ID__ == -1) \
170 asm("decq "__percpu_arg(0) : "+m" (var)); \
171 else \
172 asm("addq %1, "__percpu_arg(0) \
173 : "+m" (var) \
174 : "re" ((pao_T__)(val))); \
175 break; \
176 default: __bad_percpu_size(); \
177 } \
178} while (0)
179
ed8d9adf 180#define percpu_from_op(op, var, constraint) \
bc9e3be2 181({ \
0f5e4816 182 typeof(var) pfo_ret__; \
bc9e3be2
JP
183 switch (sizeof(var)) { \
184 case 1: \
87b26406 185 asm(op "b "__percpu_arg(1)",%0" \
0f5e4816 186 : "=q" (pfo_ret__) \
ed8d9adf 187 : constraint); \
bc9e3be2
JP
188 break; \
189 case 2: \
87b26406 190 asm(op "w "__percpu_arg(1)",%0" \
0f5e4816 191 : "=r" (pfo_ret__) \
ed8d9adf 192 : constraint); \
bc9e3be2
JP
193 break; \
194 case 4: \
87b26406 195 asm(op "l "__percpu_arg(1)",%0" \
0f5e4816 196 : "=r" (pfo_ret__) \
ed8d9adf 197 : constraint); \
9939ddaf
TH
198 break; \
199 case 8: \
87b26406 200 asm(op "q "__percpu_arg(1)",%0" \
0f5e4816 201 : "=r" (pfo_ret__) \
ed8d9adf 202 : constraint); \
bc9e3be2
JP
203 break; \
204 default: __bad_percpu_size(); \
205 } \
0f5e4816 206 pfo_ret__; \
bc9e3be2 207})
3334052a 208
402af0d7
JB
209#define percpu_unary_op(op, var) \
210({ \
211 switch (sizeof(var)) { \
212 case 1: \
213 asm(op "b "__percpu_arg(0) \
214 : "+m" (var)); \
215 break; \
216 case 2: \
217 asm(op "w "__percpu_arg(0) \
218 : "+m" (var)); \
219 break; \
220 case 4: \
221 asm(op "l "__percpu_arg(0) \
222 : "+m" (var)); \
223 break; \
224 case 8: \
225 asm(op "q "__percpu_arg(0) \
226 : "+m" (var)); \
227 break; \
228 default: __bad_percpu_size(); \
229 } \
230})
231
40304775
TH
232/*
233 * Add return operation
234 */
235#define percpu_add_return_op(var, val) \
236({ \
237 typeof(var) paro_ret__ = val; \
238 switch (sizeof(var)) { \
239 case 1: \
240 asm("xaddb %0, "__percpu_arg(1) \
241 : "+q" (paro_ret__), "+m" (var) \
242 : : "memory"); \
243 break; \
244 case 2: \
245 asm("xaddw %0, "__percpu_arg(1) \
246 : "+r" (paro_ret__), "+m" (var) \
247 : : "memory"); \
248 break; \
249 case 4: \
250 asm("xaddl %0, "__percpu_arg(1) \
251 : "+r" (paro_ret__), "+m" (var) \
252 : : "memory"); \
253 break; \
254 case 8: \
255 asm("xaddq %0, "__percpu_arg(1) \
256 : "+re" (paro_ret__), "+m" (var) \
257 : : "memory"); \
258 break; \
259 default: __bad_percpu_size(); \
260 } \
261 paro_ret__ += val; \
262 paro_ret__; \
263})
264
ed8d9adf
LT
265/*
266 * percpu_read() makes gcc load the percpu variable every time it is
267 * accessed while percpu_read_stable() allows the value to be cached.
268 * percpu_read_stable() is more efficient and can be used if its value
269 * is guaranteed to be valid across cpus. The current users include
270 * get_current() and get_thread_info() both of which are actually
271 * per-thread variables implemented as per-cpu variables and thus
272 * stable for the duration of the respective task.
273 */
dd17c8f7
RR
274#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
275#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
276#define percpu_write(var, val) percpu_to_op("mov", var, val)
5917dae8
CL
277#define percpu_add(var, val) percpu_add_op(var, val)
278#define percpu_sub(var, val) percpu_add_op(var, -(val))
dd17c8f7
RR
279#define percpu_and(var, val) percpu_to_op("and", var, val)
280#define percpu_or(var, val) percpu_to_op("or", var, val)
281#define percpu_xor(var, val) percpu_to_op("xor", var, val)
402af0d7 282#define percpu_inc(var) percpu_unary_op("inc", var)
9939ddaf 283
30ed1a79
CL
284#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
285#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
286#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
287
288#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
289#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
290#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8
CL
291#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
292#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
293#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
294#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
295#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
296#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
297#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
298#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
299#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
300#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
301#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
302#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
303
304#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
305#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
306#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
307#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
308#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
309#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8
CL
310#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
311#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
312#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
313#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
314#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
315#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
316#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
317#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
318#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
319#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
320#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
321#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
322
5917dae8
CL
323#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
324#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
325#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
326#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
327#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
328#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
329#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
330#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
331#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
332#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
333#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
334#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
335
8f1d97c7
CL
336#ifndef CONFIG_M386
337#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
338#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
339#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
340#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
341#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
342#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
343#endif
30ed1a79
CL
344/*
345 * Per cpu atomic 64 bit operations are only available under 64 bit.
346 * 32 bit must fall back to generic operations.
347 */
348#ifdef CONFIG_X86_64
349#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
350#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8 351#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
352#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
353#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
354#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
40304775 355#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
30ed1a79
CL
356
357#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
358#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8 359#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
360#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
361#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
362#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
40304775 363#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
30ed1a79 364
5917dae8 365#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
366#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
367#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
368#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
30ed1a79
CL
369#endif
370
49357d19
TH
371/* This is not atomic against other CPUs -- CPU preemption needs to be off */
372#define x86_test_and_clear_bit_percpu(bit, var) \
373({ \
374 int old__; \
87b26406 375 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
dd17c8f7 376 : "=r" (old__), "+m" (var) \
87b26406 377 : "dIr" (bit)); \
49357d19
TH
378 old__; \
379})
380
6dbde353
IM
381#include <asm-generic/percpu.h>
382
383/* We can use this directly for local CPU (faster). */
384DECLARE_PER_CPU(unsigned long, this_cpu_off);
385
3334052a 386#endif /* !__ASSEMBLY__ */
23ca4bba
MT
387
388#ifdef CONFIG_SMP
389
390/*
391 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
392 * variables that are initialized and accessed before there are per_cpu
393 * areas allocated.
394 */
395
396#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
397 DEFINE_PER_CPU(_type, _name) = _initvalue; \
398 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
399 { [0 ... NR_CPUS-1] = _initvalue }; \
c6a92a25 400 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
23ca4bba
MT
401
402#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
403 EXPORT_PER_CPU_SYMBOL(_name)
404
405#define DECLARE_EARLY_PER_CPU(_type, _name) \
406 DECLARE_PER_CPU(_type, _name); \
407 extern __typeof__(_type) *_name##_early_ptr; \
408 extern __typeof__(_type) _name##_early_map[]
409
410#define early_per_cpu_ptr(_name) (_name##_early_ptr)
411#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
412#define early_per_cpu(_name, _cpu) \
f10fcd47
TH
413 *(early_per_cpu_ptr(_name) ? \
414 &early_per_cpu_ptr(_name)[_cpu] : \
415 &per_cpu(_name, _cpu))
23ca4bba
MT
416
417#else /* !CONFIG_SMP */
418#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
419 DEFINE_PER_CPU(_type, _name) = _initvalue
420
421#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
422 EXPORT_PER_CPU_SYMBOL(_name)
423
424#define DECLARE_EARLY_PER_CPU(_type, _name) \
425 DECLARE_PER_CPU(_type, _name)
426
427#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
428#define early_per_cpu_ptr(_name) NULL
429/* no early_per_cpu_map() */
430
431#endif /* !CONFIG_SMP */
432
1965aae3 433#endif /* _ASM_X86_PERCPU_H */
This page took 0.321779 seconds and 5 git commands to generate.