1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
4 #include <linux/mmdebug.h>
5 #include <linux/preempt.h>
7 #include <linux/cpumask.h>
9 #include <linux/init.h>
11 #include <asm/percpu.h>
13 /* enough to cover all DEFINE_PER_CPUs in modules */
15 #define PERCPU_MODULE_RESERVE (8 << 10)
17 #define PERCPU_MODULE_RESERVE 0
20 #ifndef PERCPU_ENOUGH_ROOM
21 #define PERCPU_ENOUGH_ROOM \
22 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
23 PERCPU_MODULE_RESERVE)
27 * Must be an lvalue. Since @var must be a simple identifier,
28 * we force a syntax error here if it isn't.
30 #define get_cpu_var(var) (*({ \
32 &__get_cpu_var(var); }))
35 * The weird & is necessary because sparse considers (void)(var) to be
36 * a direct dereference of percpu variable (var).
38 #define put_cpu_var(var) do { \
43 #define get_cpu_ptr(var) ({ \
47 #define put_cpu_ptr(var) do { \
52 /* minimum unit size, also is the maximum supported allocation size */
53 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
56 * Percpu allocator can serve percpu allocations before slab is
57 * initialized which allows slab to depend on the percpu allocator.
58 * The following two parameters decide how much resource to
59 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
60 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
62 #define PERCPU_DYNAMIC_EARLY_SLOTS 128
63 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
66 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
67 * back on the first chunk for dynamic percpu allocation if arch is
68 * manually allocating and mapping it for faster access (as a part of
69 * large page mapping for example).
71 * The following values give between one and two pages of free space
72 * after typical minimal boot (2-way SMP, single disk and NIC) with
73 * both defconfig and a distro config on x86_64 and 32. More
74 * intelligent way to determine this would be nice.
76 #if BITS_PER_LONG > 32
77 #define PERCPU_DYNAMIC_RESERVE (20 << 10)
79 #define PERCPU_DYNAMIC_RESERVE (12 << 10)
82 extern void *pcpu_base_addr
;
83 extern const unsigned long *pcpu_unit_offsets
;
85 struct pcpu_group_info
{
86 int nr_units
; /* aligned # of units */
87 unsigned long base_offset
; /* base address offset */
88 unsigned int *cpu_map
; /* unit->cpu map, empty
89 * entries contain NR_CPUS */
92 struct pcpu_alloc_info
{
99 size_t __ai_size
; /* internal, don't use */
100 int nr_groups
; /* 0 if grouping unnecessary */
101 struct pcpu_group_info groups
[];
111 extern const char * const pcpu_fc_names
[PCPU_FC_NR
];
113 extern enum pcpu_fc pcpu_chosen_fc
;
115 typedef void * (*pcpu_fc_alloc_fn_t
)(unsigned int cpu
, size_t size
,
117 typedef void (*pcpu_fc_free_fn_t
)(void *ptr
, size_t size
);
118 typedef void (*pcpu_fc_populate_pte_fn_t
)(unsigned long addr
);
119 typedef int (pcpu_fc_cpu_distance_fn_t
)(unsigned int from
, unsigned int to
);
121 extern struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
123 extern void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
);
125 extern int __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
128 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
129 extern int __init
pcpu_embed_first_chunk(size_t reserved_size
, size_t dyn_size
,
131 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
,
132 pcpu_fc_alloc_fn_t alloc_fn
,
133 pcpu_fc_free_fn_t free_fn
);
136 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
137 extern int __init
pcpu_page_first_chunk(size_t reserved_size
,
138 pcpu_fc_alloc_fn_t alloc_fn
,
139 pcpu_fc_free_fn_t free_fn
,
140 pcpu_fc_populate_pte_fn_t populate_pte_fn
);
144 * Use this to get to a cpu's version of the per-cpu object
145 * dynamically allocated. Non-atomic access to the current CPU's
146 * version should probably be combined with get_cpu()/put_cpu().
149 #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
151 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
154 extern void __percpu
*__alloc_reserved_percpu(size_t size
, size_t align
);
155 extern bool is_kernel_percpu_address(unsigned long addr
);
157 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
158 extern void __init
setup_per_cpu_areas(void);
160 extern void __init
percpu_init_late(void);
162 extern void __percpu
*__alloc_percpu(size_t size
, size_t align
);
163 extern void free_percpu(void __percpu
*__pdata
);
164 extern phys_addr_t
per_cpu_ptr_to_phys(void *addr
);
166 #define alloc_percpu(type) \
167 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
170 * Branching function to split up a function into a set of functions that
171 * are called for different scalar sizes of the objects handled.
174 extern void __bad_size_call_parameter(void);
176 #ifdef CONFIG_DEBUG_PREEMPT
177 extern void __this_cpu_preempt_check(const char *op
);
179 static inline void __this_cpu_preempt_check(const char *op
) { }
182 #define __pcpu_size_call_return(stem, variable) \
183 ({ typeof(variable) pscr_ret__; \
184 __verify_pcpu_ptr(&(variable)); \
185 switch(sizeof(variable)) { \
186 case 1: pscr_ret__ = stem##1(variable);break; \
187 case 2: pscr_ret__ = stem##2(variable);break; \
188 case 4: pscr_ret__ = stem##4(variable);break; \
189 case 8: pscr_ret__ = stem##8(variable);break; \
191 __bad_size_call_parameter();break; \
196 #define __pcpu_size_call_return2(stem, variable, ...) \
198 typeof(variable) pscr2_ret__; \
199 __verify_pcpu_ptr(&(variable)); \
200 switch(sizeof(variable)) { \
201 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
202 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
203 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
204 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
206 __bad_size_call_parameter(); break; \
212 * Special handling for cmpxchg_double. cmpxchg_double is passed two
213 * percpu variables. The first has to be aligned to a double word
214 * boundary and the second has to follow directly thereafter.
215 * We enforce this on all architectures even if they don't support
216 * a double cmpxchg instruction, since it's a cheap requirement, and it
217 * avoids breaking the requirement for architectures with the instruction.
219 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
222 __verify_pcpu_ptr(&pcp1); \
223 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
224 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
225 VM_BUG_ON((unsigned long)(&pcp2) != \
226 (unsigned long)(&pcp1) + sizeof(pcp1)); \
227 switch(sizeof(pcp1)) { \
228 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
229 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
230 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
231 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
233 __bad_size_call_parameter(); break; \
238 #define __pcpu_size_call(stem, variable, ...) \
240 __verify_pcpu_ptr(&(variable)); \
241 switch(sizeof(variable)) { \
242 case 1: stem##1(variable, __VA_ARGS__);break; \
243 case 2: stem##2(variable, __VA_ARGS__);break; \
244 case 4: stem##4(variable, __VA_ARGS__);break; \
245 case 8: stem##8(variable, __VA_ARGS__);break; \
247 __bad_size_call_parameter();break; \
252 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
254 * Optimized manipulation for memory allocated through the per cpu
255 * allocator or for addresses of per cpu variables.
257 * These operation guarantee exclusivity of access for other operations
258 * on the *same* processor. The assumption is that per cpu data is only
259 * accessed by a single processor instance (the current one).
261 * The first group is used for accesses that must be done in a
262 * preemption safe way since we know that the context is not preempt
263 * safe. Interrupts may occur. If the interrupt modifies the variable
264 * too then RMW actions will not be reliable.
266 * The arch code can provide optimized functions in two ways:
268 * 1. Override the function completely. F.e. define this_cpu_add().
269 * The arch must then ensure that the various scalar format passed
270 * are handled correctly.
272 * 2. Provide functions for certain scalar sizes. F.e. provide
273 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
274 * sized RMW actions. If arch code does not provide operations for
275 * a scalar size then the fallback in the generic code will be
279 #define _this_cpu_generic_read(pcp) \
280 ({ typeof(pcp) ret__; \
282 ret__ = *this_cpu_ptr(&(pcp)); \
287 #ifndef this_cpu_read
288 # ifndef this_cpu_read_1
289 # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
291 # ifndef this_cpu_read_2
292 # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
294 # ifndef this_cpu_read_4
295 # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
297 # ifndef this_cpu_read_8
298 # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
300 # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
303 #define _this_cpu_generic_to_op(pcp, val, op) \
305 unsigned long flags; \
306 raw_local_irq_save(flags); \
307 *raw_cpu_ptr(&(pcp)) op val; \
308 raw_local_irq_restore(flags); \
311 #ifndef this_cpu_write
312 # ifndef this_cpu_write_1
313 # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
315 # ifndef this_cpu_write_2
316 # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
318 # ifndef this_cpu_write_4
319 # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
321 # ifndef this_cpu_write_8
322 # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
324 # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
328 # ifndef this_cpu_add_1
329 # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
331 # ifndef this_cpu_add_2
332 # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
334 # ifndef this_cpu_add_4
335 # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
337 # ifndef this_cpu_add_8
338 # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
340 # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
344 # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
348 # define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
352 # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
356 # ifndef this_cpu_and_1
357 # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
359 # ifndef this_cpu_and_2
360 # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
362 # ifndef this_cpu_and_4
363 # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
365 # ifndef this_cpu_and_8
366 # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
368 # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
372 # ifndef this_cpu_or_1
373 # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
375 # ifndef this_cpu_or_2
376 # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
378 # ifndef this_cpu_or_4
379 # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
381 # ifndef this_cpu_or_8
382 # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
384 # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
387 #define _this_cpu_generic_add_return(pcp, val) \
390 unsigned long flags; \
391 raw_local_irq_save(flags); \
392 raw_cpu_add(pcp, val); \
393 ret__ = raw_cpu_read(pcp); \
394 raw_local_irq_restore(flags); \
398 #ifndef this_cpu_add_return
399 # ifndef this_cpu_add_return_1
400 # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
402 # ifndef this_cpu_add_return_2
403 # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
405 # ifndef this_cpu_add_return_4
406 # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
408 # ifndef this_cpu_add_return_8
409 # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
411 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
414 #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
415 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
416 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
418 #define _this_cpu_generic_xchg(pcp, nval) \
419 ({ typeof(pcp) ret__; \
420 unsigned long flags; \
421 raw_local_irq_save(flags); \
422 ret__ = raw_cpu_read(pcp); \
423 raw_cpu_write(pcp, nval); \
424 raw_local_irq_restore(flags); \
428 #ifndef this_cpu_xchg
429 # ifndef this_cpu_xchg_1
430 # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
432 # ifndef this_cpu_xchg_2
433 # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
435 # ifndef this_cpu_xchg_4
436 # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
438 # ifndef this_cpu_xchg_8
439 # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
441 # define this_cpu_xchg(pcp, nval) \
442 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
445 #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
448 unsigned long flags; \
449 raw_local_irq_save(flags); \
450 ret__ = raw_cpu_read(pcp); \
451 if (ret__ == (oval)) \
452 raw_cpu_write(pcp, nval); \
453 raw_local_irq_restore(flags); \
457 #ifndef this_cpu_cmpxchg
458 # ifndef this_cpu_cmpxchg_1
459 # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
461 # ifndef this_cpu_cmpxchg_2
462 # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
464 # ifndef this_cpu_cmpxchg_4
465 # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
467 # ifndef this_cpu_cmpxchg_8
468 # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
470 # define this_cpu_cmpxchg(pcp, oval, nval) \
471 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
475 * cmpxchg_double replaces two adjacent scalars at once. The first
476 * two parameters are per cpu variables which have to be of the same
477 * size. A truth value is returned to indicate success or failure
478 * (since a double register result is difficult to handle). There is
479 * very limited hardware support for these operations, so only certain
482 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
485 unsigned long flags; \
486 raw_local_irq_save(flags); \
487 ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
488 oval1, oval2, nval1, nval2); \
489 raw_local_irq_restore(flags); \
493 #ifndef this_cpu_cmpxchg_double
494 # ifndef this_cpu_cmpxchg_double_1
495 # define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
496 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
498 # ifndef this_cpu_cmpxchg_double_2
499 # define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
500 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
502 # ifndef this_cpu_cmpxchg_double_4
503 # define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
504 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
506 # ifndef this_cpu_cmpxchg_double_8
507 # define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
508 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
510 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
511 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
515 * Generic percpu operations for contexts where we do not want to do
516 * any checks for preemptiosn.
518 * If there is no other protection through preempt disable and/or
519 * disabling interupts then one of these RMW operations can show unexpected
520 * behavior because the execution thread was rescheduled on another processor
521 * or an interrupt occurred and the same percpu variable was modified from
522 * the interrupt context.
525 # ifndef raw_cpu_read_1
526 # define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
528 # ifndef raw_cpu_read_2
529 # define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
531 # ifndef raw_cpu_read_4
532 # define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
534 # ifndef raw_cpu_read_8
535 # define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
537 # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
540 #define raw_cpu_generic_to_op(pcp, val, op) \
542 *raw_cpu_ptr(&(pcp)) op val; \
546 #ifndef raw_cpu_write
547 # ifndef raw_cpu_write_1
548 # define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
550 # ifndef raw_cpu_write_2
551 # define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
553 # ifndef raw_cpu_write_4
554 # define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
556 # ifndef raw_cpu_write_8
557 # define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
559 # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
563 # ifndef raw_cpu_add_1
564 # define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
566 # ifndef raw_cpu_add_2
567 # define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
569 # ifndef raw_cpu_add_4
570 # define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
572 # ifndef raw_cpu_add_8
573 # define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
575 # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
579 # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
583 # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
587 # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
591 # ifndef raw_cpu_and_1
592 # define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
594 # ifndef raw_cpu_and_2
595 # define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
597 # ifndef raw_cpu_and_4
598 # define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
600 # ifndef raw_cpu_and_8
601 # define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
603 # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
607 # ifndef raw_cpu_or_1
608 # define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
610 # ifndef raw_cpu_or_2
611 # define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
613 # ifndef raw_cpu_or_4
614 # define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
616 # ifndef raw_cpu_or_8
617 # define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
619 # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
622 #define raw_cpu_generic_add_return(pcp, val) \
624 raw_cpu_add(pcp, val); \
628 #ifndef raw_cpu_add_return
629 # ifndef raw_cpu_add_return_1
630 # define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
632 # ifndef raw_cpu_add_return_2
633 # define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
635 # ifndef raw_cpu_add_return_4
636 # define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
638 # ifndef raw_cpu_add_return_8
639 # define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
641 # define raw_cpu_add_return(pcp, val) \
642 __pcpu_size_call_return2(raw_add_return_, pcp, val)
645 #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
646 #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
647 #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
649 #define raw_cpu_generic_xchg(pcp, nval) \
650 ({ typeof(pcp) ret__; \
651 ret__ = raw_cpu_read(pcp); \
652 raw_cpu_write(pcp, nval); \
657 # ifndef raw_cpu_xchg_1
658 # define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
660 # ifndef raw_cpu_xchg_2
661 # define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
663 # ifndef raw_cpu_xchg_4
664 # define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
666 # ifndef raw_cpu_xchg_8
667 # define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
669 # define raw_cpu_xchg(pcp, nval) \
670 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
673 #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
676 ret__ = raw_cpu_read(pcp); \
677 if (ret__ == (oval)) \
678 raw_cpu_write(pcp, nval); \
682 #ifndef raw_cpu_cmpxchg
683 # ifndef raw_cpu_cmpxchg_1
684 # define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
686 # ifndef raw_cpu_cmpxchg_2
687 # define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
689 # ifndef raw_cpu_cmpxchg_4
690 # define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
692 # ifndef raw_cpu_cmpxchg_8
693 # define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
695 # define raw_cpu_cmpxchg(pcp, oval, nval) \
696 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
699 #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
702 if (raw_cpu_read(pcp1) == (oval1) && \
703 raw_cpu_read(pcp2) == (oval2)) { \
704 raw_cpu_write(pcp1, (nval1)); \
705 raw_cpu_write(pcp2, (nval2)); \
711 #ifndef raw_cpu_cmpxchg_double
712 # ifndef raw_cpu_cmpxchg_double_1
713 # define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
714 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
716 # ifndef raw_cpu_cmpxchg_double_2
717 # define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
718 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
720 # ifndef raw_cpu_cmpxchg_double_4
721 # define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
722 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
724 # ifndef raw_cpu_cmpxchg_double_8
725 # define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
726 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
728 # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
729 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
733 * Generic percpu operations for context that are safe from preemption/interrupts.
735 #ifndef __this_cpu_read
736 # define __this_cpu_read(pcp) \
737 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
740 #ifndef __this_cpu_write
741 # define __this_cpu_write(pcp, val) \
742 do { __this_cpu_preempt_check("write"); \
743 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
747 #ifndef __this_cpu_add
748 # define __this_cpu_add(pcp, val) \
749 do { __this_cpu_preempt_check("add"); \
750 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
754 #ifndef __this_cpu_sub
755 # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
758 #ifndef __this_cpu_inc
759 # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
762 #ifndef __this_cpu_dec
763 # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
766 #ifndef __this_cpu_and
767 # define __this_cpu_and(pcp, val) \
768 do { __this_cpu_preempt_check("and"); \
769 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
774 #ifndef __this_cpu_or
775 # define __this_cpu_or(pcp, val) \
776 do { __this_cpu_preempt_check("or"); \
777 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
781 #ifndef __this_cpu_add_return
782 # define __this_cpu_add_return(pcp, val) \
783 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
786 #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
787 #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
788 #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
790 #ifndef __this_cpu_xchg
791 # define __this_cpu_xchg(pcp, nval) \
792 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
795 #ifndef __this_cpu_cmpxchg
796 # define __this_cpu_cmpxchg(pcp, oval, nval) \
797 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
800 #ifndef __this_cpu_cmpxchg_double
801 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
802 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
805 #endif /* __LINUX_PERCPU_H */