1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
4 #include <linux/preempt.h>
6 #include <linux/cpumask.h>
8 #include <linux/init.h>
10 #include <asm/percpu.h>
12 /* enough to cover all DEFINE_PER_CPUs in modules */
14 #define PERCPU_MODULE_RESERVE (8 << 10)
16 #define PERCPU_MODULE_RESERVE 0
19 #ifndef PERCPU_ENOUGH_ROOM
20 #define PERCPU_ENOUGH_ROOM \
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
26 * Must be an lvalue. Since @var must be a simple identifier,
27 * we force a syntax error here if it isn't.
29 #define get_cpu_var(var) (*({ \
31 &__get_cpu_var(var); }))
34 * The weird & is necessary because sparse considers (void)(var) to be
35 * a direct dereference of percpu variable (var).
37 #define put_cpu_var(var) do { \
42 #define get_cpu_ptr(var) ({ \
46 #define put_cpu_ptr(var) do { \
51 /* minimum unit size, also is the maximum supported allocation size */
52 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
55 * Percpu allocator can serve percpu allocations before slab is
56 * initialized which allows slab to depend on the percpu allocator.
57 * The following two parameters decide how much resource to
58 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
59 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
61 #define PERCPU_DYNAMIC_EARLY_SLOTS 128
62 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
65 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
66 * back on the first chunk for dynamic percpu allocation if arch is
67 * manually allocating and mapping it for faster access (as a part of
68 * large page mapping for example).
70 * The following values give between one and two pages of free space
71 * after typical minimal boot (2-way SMP, single disk and NIC) with
72 * both defconfig and a distro config on x86_64 and 32. More
73 * intelligent way to determine this would be nice.
75 #if BITS_PER_LONG > 32
76 #define PERCPU_DYNAMIC_RESERVE (20 << 10)
78 #define PERCPU_DYNAMIC_RESERVE (12 << 10)
81 extern void *pcpu_base_addr
;
82 extern const unsigned long *pcpu_unit_offsets
;
84 struct pcpu_group_info
{
85 int nr_units
; /* aligned # of units */
86 unsigned long base_offset
; /* base address offset */
87 unsigned int *cpu_map
; /* unit->cpu map, empty
88 * entries contain NR_CPUS */
91 struct pcpu_alloc_info
{
98 size_t __ai_size
; /* internal, don't use */
99 int nr_groups
; /* 0 if grouping unnecessary */
100 struct pcpu_group_info groups
[];
110 extern const char *pcpu_fc_names
[PCPU_FC_NR
];
112 extern enum pcpu_fc pcpu_chosen_fc
;
114 typedef void * (*pcpu_fc_alloc_fn_t
)(unsigned int cpu
, size_t size
,
116 typedef void (*pcpu_fc_free_fn_t
)(void *ptr
, size_t size
);
117 typedef void (*pcpu_fc_populate_pte_fn_t
)(unsigned long addr
);
118 typedef int (pcpu_fc_cpu_distance_fn_t
)(unsigned int from
, unsigned int to
);
120 extern struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
122 extern void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
);
124 extern int __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
127 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
128 extern int __init
pcpu_embed_first_chunk(size_t reserved_size
, size_t dyn_size
,
130 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
,
131 pcpu_fc_alloc_fn_t alloc_fn
,
132 pcpu_fc_free_fn_t free_fn
);
135 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
136 extern int __init
pcpu_page_first_chunk(size_t reserved_size
,
137 pcpu_fc_alloc_fn_t alloc_fn
,
138 pcpu_fc_free_fn_t free_fn
,
139 pcpu_fc_populate_pte_fn_t populate_pte_fn
);
143 * Use this to get to a cpu's version of the per-cpu object
144 * dynamically allocated. Non-atomic access to the current CPU's
145 * version should probably be combined with get_cpu()/put_cpu().
148 #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
150 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
153 extern void __percpu
*__alloc_reserved_percpu(size_t size
, size_t align
);
154 extern bool is_kernel_percpu_address(unsigned long addr
);
156 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
157 extern void __init
setup_per_cpu_areas(void);
159 extern void __init
percpu_init_late(void);
161 extern void __percpu
*__alloc_percpu(size_t size
, size_t align
);
162 extern void free_percpu(void __percpu
*__pdata
);
163 extern phys_addr_t
per_cpu_ptr_to_phys(void *addr
);
165 #define alloc_percpu(type) \
166 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
169 * Optional methods for optimized non-lvalue per-cpu variable access.
171 * @var can be a percpu variable or a field of it and its size should
172 * equal char, int or long. percpu_read() evaluates to a lvalue and
173 * all others to void.
175 * These operations are guaranteed to be atomic w.r.t. preemption.
176 * The generic versions use plain get/put_cpu_var(). Archs are
177 * encouraged to implement single-instruction alternatives which don't
178 * require preemption protection.
181 # define percpu_read(var) \
183 typeof(var) *pr_ptr__ = &(var); \
184 typeof(var) pr_ret__; \
185 pr_ret__ = get_cpu_var(*pr_ptr__); \
186 put_cpu_var(*pr_ptr__); \
191 #define __percpu_generic_to_op(var, val, op) \
193 typeof(var) *pgto_ptr__ = &(var); \
194 get_cpu_var(*pgto_ptr__) op val; \
195 put_cpu_var(*pgto_ptr__); \
199 # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
203 # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
207 # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
211 # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
215 # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
219 # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
223 * Branching function to split up a function into a set of functions that
224 * are called for different scalar sizes of the objects handled.
227 extern void __bad_size_call_parameter(void);
229 #define __pcpu_size_call_return(stem, variable) \
230 ({ typeof(variable) pscr_ret__; \
231 __verify_pcpu_ptr(&(variable)); \
232 switch(sizeof(variable)) { \
233 case 1: pscr_ret__ = stem##1(variable);break; \
234 case 2: pscr_ret__ = stem##2(variable);break; \
235 case 4: pscr_ret__ = stem##4(variable);break; \
236 case 8: pscr_ret__ = stem##8(variable);break; \
238 __bad_size_call_parameter();break; \
243 #define __pcpu_size_call_return2(stem, variable, ...) \
245 typeof(variable) pscr2_ret__; \
246 __verify_pcpu_ptr(&(variable)); \
247 switch(sizeof(variable)) { \
248 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
249 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
250 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
251 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
253 __bad_size_call_parameter(); break; \
258 #define __pcpu_size_call(stem, variable, ...) \
260 __verify_pcpu_ptr(&(variable)); \
261 switch(sizeof(variable)) { \
262 case 1: stem##1(variable, __VA_ARGS__);break; \
263 case 2: stem##2(variable, __VA_ARGS__);break; \
264 case 4: stem##4(variable, __VA_ARGS__);break; \
265 case 8: stem##8(variable, __VA_ARGS__);break; \
267 __bad_size_call_parameter();break; \
272 * Optimized manipulation for memory allocated through the per cpu
273 * allocator or for addresses of per cpu variables.
275 * These operation guarantee exclusivity of access for other operations
276 * on the *same* processor. The assumption is that per cpu data is only
277 * accessed by a single processor instance (the current one).
279 * The first group is used for accesses that must be done in a
280 * preemption safe way since we know that the context is not preempt
281 * safe. Interrupts may occur. If the interrupt modifies the variable
282 * too then RMW actions will not be reliable.
284 * The arch code can provide optimized functions in two ways:
286 * 1. Override the function completely. F.e. define this_cpu_add().
287 * The arch must then ensure that the various scalar format passed
288 * are handled correctly.
290 * 2. Provide functions for certain scalar sizes. F.e. provide
291 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
292 * sized RMW actions. If arch code does not provide operations for
293 * a scalar size then the fallback in the generic code will be
297 #define _this_cpu_generic_read(pcp) \
298 ({ typeof(pcp) ret__; \
300 ret__ = *this_cpu_ptr(&(pcp)); \
305 #ifndef this_cpu_read
306 # ifndef this_cpu_read_1
307 # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
309 # ifndef this_cpu_read_2
310 # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
312 # ifndef this_cpu_read_4
313 # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
315 # ifndef this_cpu_read_8
316 # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
318 # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
321 #define _this_cpu_generic_to_op(pcp, val, op) \
324 *__this_cpu_ptr(&(pcp)) op val; \
328 #ifndef this_cpu_write
329 # ifndef this_cpu_write_1
330 # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
332 # ifndef this_cpu_write_2
333 # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
335 # ifndef this_cpu_write_4
336 # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
338 # ifndef this_cpu_write_8
339 # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
341 # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
345 # ifndef this_cpu_add_1
346 # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
348 # ifndef this_cpu_add_2
349 # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
351 # ifndef this_cpu_add_4
352 # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
354 # ifndef this_cpu_add_8
355 # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
357 # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
361 # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
365 # define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
369 # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
373 # ifndef this_cpu_and_1
374 # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
376 # ifndef this_cpu_and_2
377 # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
379 # ifndef this_cpu_and_4
380 # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
382 # ifndef this_cpu_and_8
383 # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
385 # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
389 # ifndef this_cpu_or_1
390 # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
392 # ifndef this_cpu_or_2
393 # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
395 # ifndef this_cpu_or_4
396 # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
398 # ifndef this_cpu_or_8
399 # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
401 # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
405 # ifndef this_cpu_xor_1
406 # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
408 # ifndef this_cpu_xor_2
409 # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
411 # ifndef this_cpu_xor_4
412 # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
414 # ifndef this_cpu_xor_8
415 # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
417 # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
421 * Generic percpu operations that do not require preemption handling.
422 * Either we do not care about races or the caller has the
423 * responsibility of handling preemptions issues. Arch code can still
424 * override these instructions since the arch per cpu code may be more
425 * efficient and may actually get race freeness for free (that is the
426 * case for x86 for example).
428 * If there is no other protection through preempt disable and/or
429 * disabling interupts then one of these RMW operations can show unexpected
430 * behavior because the execution thread was rescheduled on another processor
431 * or an interrupt occurred and the same percpu variable was modified from
432 * the interrupt context.
434 #ifndef __this_cpu_read
435 # ifndef __this_cpu_read_1
436 # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
438 # ifndef __this_cpu_read_2
439 # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
441 # ifndef __this_cpu_read_4
442 # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
444 # ifndef __this_cpu_read_8
445 # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
447 # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
450 #define __this_cpu_generic_to_op(pcp, val, op) \
452 *__this_cpu_ptr(&(pcp)) op val; \
455 #ifndef __this_cpu_write
456 # ifndef __this_cpu_write_1
457 # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
459 # ifndef __this_cpu_write_2
460 # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
462 # ifndef __this_cpu_write_4
463 # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
465 # ifndef __this_cpu_write_8
466 # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
468 # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
471 #ifndef __this_cpu_add
472 # ifndef __this_cpu_add_1
473 # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
475 # ifndef __this_cpu_add_2
476 # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
478 # ifndef __this_cpu_add_4
479 # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
481 # ifndef __this_cpu_add_8
482 # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
484 # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
487 #ifndef __this_cpu_sub
488 # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
491 #ifndef __this_cpu_inc
492 # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
495 #ifndef __this_cpu_dec
496 # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
499 #ifndef __this_cpu_and
500 # ifndef __this_cpu_and_1
501 # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
503 # ifndef __this_cpu_and_2
504 # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
506 # ifndef __this_cpu_and_4
507 # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
509 # ifndef __this_cpu_and_8
510 # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
512 # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
515 #ifndef __this_cpu_or
516 # ifndef __this_cpu_or_1
517 # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
519 # ifndef __this_cpu_or_2
520 # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
522 # ifndef __this_cpu_or_4
523 # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
525 # ifndef __this_cpu_or_8
526 # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
528 # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
531 #ifndef __this_cpu_xor
532 # ifndef __this_cpu_xor_1
533 # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
535 # ifndef __this_cpu_xor_2
536 # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
538 # ifndef __this_cpu_xor_4
539 # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
541 # ifndef __this_cpu_xor_8
542 # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
544 # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
547 #define _this_cpu_generic_add_return(pcp, val) \
551 __this_cpu_add(pcp, val); \
552 ret__ = __this_cpu_read(pcp); \
557 #ifndef this_cpu_add_return
558 # ifndef this_cpu_add_return_1
559 # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
561 # ifndef this_cpu_add_return_2
562 # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
564 # ifndef this_cpu_add_return_4
565 # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
567 # ifndef this_cpu_add_return_8
568 # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
570 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
573 #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
574 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
575 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
577 #define __this_cpu_generic_add_return(pcp, val) \
579 __this_cpu_add(pcp, val); \
580 __this_cpu_read(pcp); \
583 #ifndef __this_cpu_add_return
584 # ifndef __this_cpu_add_return_1
585 # define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
587 # ifndef __this_cpu_add_return_2
588 # define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
590 # ifndef __this_cpu_add_return_4
591 # define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
593 # ifndef __this_cpu_add_return_8
594 # define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
596 # define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
599 #define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
600 #define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
601 #define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
604 * IRQ safe versions of the per cpu RMW operations. Note that these operations
605 * are *not* safe against modification of the same variable from another
606 * processors (which one gets when using regular atomic operations)
607 . They are guaranteed to be atomic vs. local interrupts and
610 #define irqsafe_cpu_generic_to_op(pcp, val, op) \
612 unsigned long flags; \
613 local_irq_save(flags); \
614 *__this_cpu_ptr(&(pcp)) op val; \
615 local_irq_restore(flags); \
618 #ifndef irqsafe_cpu_add
619 # ifndef irqsafe_cpu_add_1
620 # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
622 # ifndef irqsafe_cpu_add_2
623 # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
625 # ifndef irqsafe_cpu_add_4
626 # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
628 # ifndef irqsafe_cpu_add_8
629 # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
631 # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
634 #ifndef irqsafe_cpu_sub
635 # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
638 #ifndef irqsafe_cpu_inc
639 # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
642 #ifndef irqsafe_cpu_dec
643 # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
646 #ifndef irqsafe_cpu_and
647 # ifndef irqsafe_cpu_and_1
648 # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
650 # ifndef irqsafe_cpu_and_2
651 # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
653 # ifndef irqsafe_cpu_and_4
654 # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
656 # ifndef irqsafe_cpu_and_8
657 # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
659 # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
662 #ifndef irqsafe_cpu_or
663 # ifndef irqsafe_cpu_or_1
664 # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
666 # ifndef irqsafe_cpu_or_2
667 # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
669 # ifndef irqsafe_cpu_or_4
670 # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
672 # ifndef irqsafe_cpu_or_8
673 # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
675 # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
678 #ifndef irqsafe_cpu_xor
679 # ifndef irqsafe_cpu_xor_1
680 # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
682 # ifndef irqsafe_cpu_xor_2
683 # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
685 # ifndef irqsafe_cpu_xor_4
686 # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
688 # ifndef irqsafe_cpu_xor_8
689 # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
691 # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
694 #endif /* __LINUX_PERCPU_H */