percpu: Fixup __this_cpu_xchg* operations
[deliverable/linux.git] / arch / x86 / include / asm / percpu.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PERCPU_H
2#define _ASM_X86_PERCPU_H
3334052a 3
1a51e3a0 4#ifdef CONFIG_X86_64
9939ddaf
TH
5#define __percpu_seg gs
6#define __percpu_mov_op movq
1a51e3a0 7#else
9939ddaf
TH
8#define __percpu_seg fs
9#define __percpu_mov_op movl
96a388de 10#endif
3334052a 11
12#ifdef __ASSEMBLY__
13
14/*
15 * PER_CPU finds an address of a per-cpu variable.
16 *
17 * Args:
18 * var - variable name
19 * reg - 32bit register
20 *
21 * The resulting address is stored in the "reg" argument.
22 *
23 * Example:
24 * PER_CPU(cpu_gdt_descr, %ebx)
25 */
26#ifdef CONFIG_SMP
9939ddaf 27#define PER_CPU(var, reg) \
dd17c8f7
RR
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:var
3334052a 31#else /* ! SMP */
dd17c8f7
RR
32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33#define PER_CPU_VAR(var) var
3334052a 34#endif /* SMP */
35
2add8e23
BG
36#ifdef CONFIG_X86_64_SMP
37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
38#else
dd17c8f7 39#define INIT_PER_CPU_VAR(var) var
2add8e23
BG
40#endif
41
3334052a 42#else /* ...!ASSEMBLY */
43
e59a1bb2 44#include <linux/kernel.h>
9939ddaf 45#include <linux/stringify.h>
3334052a 46
9939ddaf 47#ifdef CONFIG_SMP
d7c3f8ce 48#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
6dbde353 49#define __my_cpu_offset percpu_read(this_cpu_off)
db7829c6
BG
50
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
54 */
55#define __this_cpu_ptr(ptr) \
56({ \
57 unsigned long tcp_ptr__; \
58 __verify_pcpu_ptr(ptr); \
59 asm volatile("add " __percpu_arg(1) ", %0" \
60 : "=r" (tcp_ptr__) \
61 : "m" (this_cpu_off), "0" (ptr)); \
62 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
63})
9939ddaf 64#else
d7c3f8ce 65#define __percpu_prefix ""
9939ddaf 66#endif
3334052a 67
d7c3f8ce
CL
68#define __percpu_arg(x) __percpu_prefix "%P" #x
69
2add8e23
BG
70/*
71 * Initialized pointers to per-cpu variables needed for the boot
72 * processor need to use these macros to get the proper address
73 * offset from __per_cpu_load on SMP.
74 *
75 * There also must be an entry in vmlinux_64.lds.S
76 */
77#define DECLARE_INIT_PER_CPU(var) \
dd17c8f7 78 extern typeof(var) init_per_cpu_var(var)
2add8e23
BG
79
80#ifdef CONFIG_X86_64_SMP
81#define init_per_cpu_var(var) init_per_cpu__##var
82#else
dd17c8f7 83#define init_per_cpu_var(var) var
2add8e23
BG
84#endif
85
3334052a 86/* For arch-specific code, we can use direct single-insn ops (they
87 * don't give an lvalue though). */
88extern void __bad_percpu_size(void);
89
bc9e3be2
JP
90#define percpu_to_op(op, var, val) \
91do { \
0f5e4816 92 typedef typeof(var) pto_T__; \
bc9e3be2 93 if (0) { \
0f5e4816
TH
94 pto_T__ pto_tmp__; \
95 pto_tmp__ = (val); \
23b764d0 96 (void)pto_tmp__; \
bc9e3be2
JP
97 } \
98 switch (sizeof(var)) { \
99 case 1: \
87b26406 100 asm(op "b %1,"__percpu_arg(0) \
bc9e3be2 101 : "+m" (var) \
0f5e4816 102 : "qi" ((pto_T__)(val))); \
bc9e3be2
JP
103 break; \
104 case 2: \
87b26406 105 asm(op "w %1,"__percpu_arg(0) \
bc9e3be2 106 : "+m" (var) \
0f5e4816 107 : "ri" ((pto_T__)(val))); \
bc9e3be2
JP
108 break; \
109 case 4: \
87b26406 110 asm(op "l %1,"__percpu_arg(0) \
bc9e3be2 111 : "+m" (var) \
0f5e4816 112 : "ri" ((pto_T__)(val))); \
bc9e3be2 113 break; \
9939ddaf 114 case 8: \
87b26406 115 asm(op "q %1,"__percpu_arg(0) \
9939ddaf 116 : "+m" (var) \
0f5e4816 117 : "re" ((pto_T__)(val))); \
9939ddaf 118 break; \
bc9e3be2
JP
119 default: __bad_percpu_size(); \
120 } \
121} while (0)
122
5917dae8
CL
123/*
124 * Generate a percpu add to memory instruction and optimize code
40f0a5d0 125 * if one is added or subtracted.
5917dae8
CL
126 */
127#define percpu_add_op(var, val) \
128do { \
129 typedef typeof(var) pao_T__; \
130 const int pao_ID__ = (__builtin_constant_p(val) && \
131 ((val) == 1 || (val) == -1)) ? (val) : 0; \
132 if (0) { \
133 pao_T__ pao_tmp__; \
134 pao_tmp__ = (val); \
23b764d0 135 (void)pao_tmp__; \
5917dae8
CL
136 } \
137 switch (sizeof(var)) { \
138 case 1: \
139 if (pao_ID__ == 1) \
140 asm("incb "__percpu_arg(0) : "+m" (var)); \
141 else if (pao_ID__ == -1) \
142 asm("decb "__percpu_arg(0) : "+m" (var)); \
143 else \
144 asm("addb %1, "__percpu_arg(0) \
145 : "+m" (var) \
146 : "qi" ((pao_T__)(val))); \
147 break; \
148 case 2: \
149 if (pao_ID__ == 1) \
150 asm("incw "__percpu_arg(0) : "+m" (var)); \
151 else if (pao_ID__ == -1) \
152 asm("decw "__percpu_arg(0) : "+m" (var)); \
153 else \
154 asm("addw %1, "__percpu_arg(0) \
155 : "+m" (var) \
156 : "ri" ((pao_T__)(val))); \
157 break; \
158 case 4: \
159 if (pao_ID__ == 1) \
160 asm("incl "__percpu_arg(0) : "+m" (var)); \
161 else if (pao_ID__ == -1) \
162 asm("decl "__percpu_arg(0) : "+m" (var)); \
163 else \
164 asm("addl %1, "__percpu_arg(0) \
165 : "+m" (var) \
166 : "ri" ((pao_T__)(val))); \
167 break; \
168 case 8: \
169 if (pao_ID__ == 1) \
170 asm("incq "__percpu_arg(0) : "+m" (var)); \
171 else if (pao_ID__ == -1) \
172 asm("decq "__percpu_arg(0) : "+m" (var)); \
173 else \
174 asm("addq %1, "__percpu_arg(0) \
175 : "+m" (var) \
176 : "re" ((pao_T__)(val))); \
177 break; \
178 default: __bad_percpu_size(); \
179 } \
180} while (0)
181
ed8d9adf 182#define percpu_from_op(op, var, constraint) \
bc9e3be2 183({ \
0f5e4816 184 typeof(var) pfo_ret__; \
bc9e3be2
JP
185 switch (sizeof(var)) { \
186 case 1: \
87b26406 187 asm(op "b "__percpu_arg(1)",%0" \
0f5e4816 188 : "=q" (pfo_ret__) \
ed8d9adf 189 : constraint); \
bc9e3be2
JP
190 break; \
191 case 2: \
87b26406 192 asm(op "w "__percpu_arg(1)",%0" \
0f5e4816 193 : "=r" (pfo_ret__) \
ed8d9adf 194 : constraint); \
bc9e3be2
JP
195 break; \
196 case 4: \
87b26406 197 asm(op "l "__percpu_arg(1)",%0" \
0f5e4816 198 : "=r" (pfo_ret__) \
ed8d9adf 199 : constraint); \
9939ddaf
TH
200 break; \
201 case 8: \
87b26406 202 asm(op "q "__percpu_arg(1)",%0" \
0f5e4816 203 : "=r" (pfo_ret__) \
ed8d9adf 204 : constraint); \
bc9e3be2
JP
205 break; \
206 default: __bad_percpu_size(); \
207 } \
0f5e4816 208 pfo_ret__; \
bc9e3be2 209})
3334052a 210
402af0d7
JB
211#define percpu_unary_op(op, var) \
212({ \
213 switch (sizeof(var)) { \
214 case 1: \
215 asm(op "b "__percpu_arg(0) \
216 : "+m" (var)); \
217 break; \
218 case 2: \
219 asm(op "w "__percpu_arg(0) \
220 : "+m" (var)); \
221 break; \
222 case 4: \
223 asm(op "l "__percpu_arg(0) \
224 : "+m" (var)); \
225 break; \
226 case 8: \
227 asm(op "q "__percpu_arg(0) \
228 : "+m" (var)); \
229 break; \
230 default: __bad_percpu_size(); \
231 } \
232})
233
40304775
TH
234/*
235 * Add return operation
236 */
237#define percpu_add_return_op(var, val) \
238({ \
239 typeof(var) paro_ret__ = val; \
240 switch (sizeof(var)) { \
241 case 1: \
242 asm("xaddb %0, "__percpu_arg(1) \
243 : "+q" (paro_ret__), "+m" (var) \
244 : : "memory"); \
245 break; \
246 case 2: \
247 asm("xaddw %0, "__percpu_arg(1) \
248 : "+r" (paro_ret__), "+m" (var) \
249 : : "memory"); \
250 break; \
251 case 4: \
252 asm("xaddl %0, "__percpu_arg(1) \
253 : "+r" (paro_ret__), "+m" (var) \
254 : : "memory"); \
255 break; \
256 case 8: \
257 asm("xaddq %0, "__percpu_arg(1) \
258 : "+re" (paro_ret__), "+m" (var) \
259 : : "memory"); \
260 break; \
261 default: __bad_percpu_size(); \
262 } \
263 paro_ret__ += val; \
264 paro_ret__; \
265})
266
7296e08a 267/*
8270137a
CL
268 * xchg is implemented using cmpxchg without a lock prefix. xchg is
269 * expensive due to the implied lock prefix. The processor cannot prefetch
270 * cachelines if xchg is used.
7296e08a
CL
271 */
272#define percpu_xchg_op(var, nval) \
273({ \
274 typeof(var) pxo_ret__; \
275 typeof(var) pxo_new__ = (nval); \
276 switch (sizeof(var)) { \
277 case 1: \
889a7a6a
ED
278 asm("\n\tmov "__percpu_arg(1)",%%al" \
279 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
8270137a 280 "\n\tjnz 1b" \
889a7a6a 281 : "=&a" (pxo_ret__), "+m" (var) \
7296e08a
CL
282 : "q" (pxo_new__) \
283 : "memory"); \
284 break; \
285 case 2: \
889a7a6a
ED
286 asm("\n\tmov "__percpu_arg(1)",%%ax" \
287 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
8270137a 288 "\n\tjnz 1b" \
889a7a6a 289 : "=&a" (pxo_ret__), "+m" (var) \
7296e08a
CL
290 : "r" (pxo_new__) \
291 : "memory"); \
292 break; \
293 case 4: \
889a7a6a
ED
294 asm("\n\tmov "__percpu_arg(1)",%%eax" \
295 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
8270137a 296 "\n\tjnz 1b" \
889a7a6a 297 : "=&a" (pxo_ret__), "+m" (var) \
7296e08a
CL
298 : "r" (pxo_new__) \
299 : "memory"); \
300 break; \
301 case 8: \
889a7a6a
ED
302 asm("\n\tmov "__percpu_arg(1)",%%rax" \
303 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
8270137a 304 "\n\tjnz 1b" \
889a7a6a 305 : "=&a" (pxo_ret__), "+m" (var) \
7296e08a
CL
306 : "r" (pxo_new__) \
307 : "memory"); \
308 break; \
309 default: __bad_percpu_size(); \
310 } \
311 pxo_ret__; \
312})
313
314/*
315 * cmpxchg has no such implied lock semantics as a result it is much
316 * more efficient for cpu local operations.
317 */
318#define percpu_cmpxchg_op(var, oval, nval) \
319({ \
320 typeof(var) pco_ret__; \
321 typeof(var) pco_old__ = (oval); \
322 typeof(var) pco_new__ = (nval); \
323 switch (sizeof(var)) { \
324 case 1: \
325 asm("cmpxchgb %2, "__percpu_arg(1) \
326 : "=a" (pco_ret__), "+m" (var) \
327 : "q" (pco_new__), "0" (pco_old__) \
328 : "memory"); \
329 break; \
330 case 2: \
331 asm("cmpxchgw %2, "__percpu_arg(1) \
332 : "=a" (pco_ret__), "+m" (var) \
333 : "r" (pco_new__), "0" (pco_old__) \
334 : "memory"); \
335 break; \
336 case 4: \
337 asm("cmpxchgl %2, "__percpu_arg(1) \
338 : "=a" (pco_ret__), "+m" (var) \
339 : "r" (pco_new__), "0" (pco_old__) \
340 : "memory"); \
341 break; \
342 case 8: \
343 asm("cmpxchgq %2, "__percpu_arg(1) \
344 : "=a" (pco_ret__), "+m" (var) \
345 : "r" (pco_new__), "0" (pco_old__) \
346 : "memory"); \
347 break; \
348 default: __bad_percpu_size(); \
349 } \
350 pco_ret__; \
351})
352
ed8d9adf
LT
353/*
354 * percpu_read() makes gcc load the percpu variable every time it is
355 * accessed while percpu_read_stable() allows the value to be cached.
356 * percpu_read_stable() is more efficient and can be used if its value
357 * is guaranteed to be valid across cpus. The current users include
358 * get_current() and get_thread_info() both of which are actually
359 * per-thread variables implemented as per-cpu variables and thus
360 * stable for the duration of the respective task.
361 */
dd17c8f7
RR
362#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
363#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
364#define percpu_write(var, val) percpu_to_op("mov", var, val)
5917dae8
CL
365#define percpu_add(var, val) percpu_add_op(var, val)
366#define percpu_sub(var, val) percpu_add_op(var, -(val))
dd17c8f7
RR
367#define percpu_and(var, val) percpu_to_op("and", var, val)
368#define percpu_or(var, val) percpu_to_op("or", var, val)
369#define percpu_xor(var, val) percpu_to_op("xor", var, val)
402af0d7 370#define percpu_inc(var) percpu_unary_op("inc", var)
9939ddaf 371
30ed1a79
CL
372#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
373#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
374#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
375
376#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
377#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
378#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8
CL
379#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
380#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
381#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
382#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
383#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
384#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
385#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
386#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
387#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
388#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
389#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
390#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
688d3be8
CL
391#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
392#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
393#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
30ed1a79
CL
394
395#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
396#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
397#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
398#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
399#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
400#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8
CL
401#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
402#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
403#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
404#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
405#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
406#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
407#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
408#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
409#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
410#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
411#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
412#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
7296e08a
CL
413#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
414#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
415#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
30ed1a79 416
5917dae8
CL
417#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
418#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
419#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
420#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
421#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
422#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
423#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
424#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
425#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
426#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
427#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
428#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
7296e08a
CL
429#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
430#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
431#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
30ed1a79 432
8f1d97c7
CL
433#ifndef CONFIG_M386
434#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
435#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
436#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
7296e08a
CL
437#define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
438#define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
439#define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
440
8f1d97c7
CL
441#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
442#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
443#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
7296e08a
CL
444#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
445#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
446#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
447
448#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
449#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
450#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
451#endif /* !CONFIG_M386 */
452
b9ec40af
CL
453#ifdef CONFIG_X86_CMPXCHG64
454#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
455({ \
456 char __ret; \
457 typeof(o1) __o1 = o1; \
458 typeof(o1) __n1 = n1; \
459 typeof(o2) __o2 = o2; \
460 typeof(o2) __n2 = n2; \
461 typeof(o2) __dummy = n2; \
462 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
463 : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
464 : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
465 __ret; \
466})
467
468#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
469#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
470#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
471#endif /* CONFIG_X86_CMPXCHG64 */
472
30ed1a79
CL
473/*
474 * Per cpu atomic 64 bit operations are only available under 64 bit.
475 * 32 bit must fall back to generic operations.
476 */
477#ifdef CONFIG_X86_64
478#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
479#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8 480#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
481#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
482#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
483#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
40304775 484#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
688d3be8
CL
485#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
486#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
30ed1a79
CL
487
488#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
489#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
5917dae8 490#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
491#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
492#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
493#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
40304775 494#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
2485b646
CL
495#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
496#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
30ed1a79 497
5917dae8 498#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
30ed1a79
CL
499#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
500#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
501#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
2485b646
CL
502#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
503#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
b9ec40af
CL
504
505/*
506 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
507 * is not supported on early AMD64 processors so we must be able to emulate
508 * it in software. The address used in the cmpxchg16 instruction must be
509 * aligned to a 16 byte boundary.
510 */
5f55924d 511#ifdef CONFIG_SMP
5129df03 512#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
5f55924d 513#else
5129df03 514#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
5f55924d 515#endif
b9ec40af
CL
516#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
517({ \
518 char __ret; \
519 typeof(o1) __o1 = o1; \
520 typeof(o1) __n1 = n1; \
521 typeof(o2) __o2 = o2; \
522 typeof(o2) __n2 = n2; \
523 typeof(o2) __dummy; \
5f55924d 524 alternative_io(CMPXCHG16B_EMU_CALL, \
d7c3f8ce 525 "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \
b9ec40af
CL
526 X86_FEATURE_CX16, \
527 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
528 "S" (&pcp1), "b"(__n1), "c"(__n2), \
d7c3f8ce 529 "a"(__o1), "d"(__o2) : "memory"); \
b9ec40af
CL
530 __ret; \
531})
532
533#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
534#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
535#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
536
30ed1a79
CL
537#endif
538
49357d19
TH
539/* This is not atomic against other CPUs -- CPU preemption needs to be off */
540#define x86_test_and_clear_bit_percpu(bit, var) \
541({ \
542 int old__; \
87b26406 543 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
dd17c8f7 544 : "=r" (old__), "+m" (var) \
87b26406 545 : "dIr" (bit)); \
49357d19
TH
546 old__; \
547})
548
349c004e
CL
549static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
550 const unsigned long __percpu *addr)
551{
552 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
553
554 return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0;
555}
556
557static inline int x86_this_cpu_variable_test_bit(int nr,
558 const unsigned long __percpu *addr)
559{
560 int oldbit;
561
562 asm volatile("bt "__percpu_arg(2)",%1\n\t"
563 "sbb %0,%0"
564 : "=r" (oldbit)
565 : "m" (*(unsigned long *)addr), "Ir" (nr));
566
567 return oldbit;
568}
569
570#define x86_this_cpu_test_bit(nr, addr) \
571 (__builtin_constant_p((nr)) \
572 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
573 : x86_this_cpu_variable_test_bit((nr), (addr)))
574
575
6dbde353
IM
576#include <asm-generic/percpu.h>
577
578/* We can use this directly for local CPU (faster). */
579DECLARE_PER_CPU(unsigned long, this_cpu_off);
580
3334052a 581#endif /* !__ASSEMBLY__ */
23ca4bba
MT
582
583#ifdef CONFIG_SMP
584
585/*
586 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
587 * variables that are initialized and accessed before there are per_cpu
588 * areas allocated.
589 */
590
591#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
592 DEFINE_PER_CPU(_type, _name) = _initvalue; \
593 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
594 { [0 ... NR_CPUS-1] = _initvalue }; \
c6a92a25 595 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
23ca4bba
MT
596
597#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
598 EXPORT_PER_CPU_SYMBOL(_name)
599
600#define DECLARE_EARLY_PER_CPU(_type, _name) \
601 DECLARE_PER_CPU(_type, _name); \
602 extern __typeof__(_type) *_name##_early_ptr; \
603 extern __typeof__(_type) _name##_early_map[]
604
605#define early_per_cpu_ptr(_name) (_name##_early_ptr)
606#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
607#define early_per_cpu(_name, _cpu) \
f10fcd47
TH
608 *(early_per_cpu_ptr(_name) ? \
609 &early_per_cpu_ptr(_name)[_cpu] : \
610 &per_cpu(_name, _cpu))
23ca4bba
MT
611
612#else /* !CONFIG_SMP */
613#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
614 DEFINE_PER_CPU(_type, _name) = _initvalue
615
616#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
617 EXPORT_PER_CPU_SYMBOL(_name)
618
619#define DECLARE_EARLY_PER_CPU(_type, _name) \
620 DECLARE_PER_CPU(_type, _name)
621
622#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
623#define early_per_cpu_ptr(_name) NULL
624/* no early_per_cpu_map() */
625
626#endif /* !CONFIG_SMP */
627
1965aae3 628#endif /* _ASM_X86_PERCPU_H */
This page took 0.339327 seconds and 5 git commands to generate.