2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
23 struct call_function_data
{
24 struct call_single_data __percpu
*csd
;
25 cpumask_var_t cpumask
;
26 cpumask_var_t cpumask_ipi
;
29 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data
, cfd_data
);
31 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head
, call_single_queue
);
34 hotplug_cfd(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
36 long cpu
= (long)hcpu
;
37 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
41 case CPU_UP_PREPARE_FROZEN
:
42 if (!zalloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
44 return notifier_from_errno(-ENOMEM
);
45 if (!zalloc_cpumask_var_node(&cfd
->cpumask_ipi
, GFP_KERNEL
,
47 free_cpumask_var(cfd
->cpumask
);
48 return notifier_from_errno(-ENOMEM
);
50 cfd
->csd
= alloc_percpu(struct call_single_data
);
52 free_cpumask_var(cfd
->cpumask_ipi
);
53 free_cpumask_var(cfd
->cpumask
);
54 return notifier_from_errno(-ENOMEM
);
58 #ifdef CONFIG_HOTPLUG_CPU
60 case CPU_UP_CANCELED_FROZEN
:
64 free_cpumask_var(cfd
->cpumask
);
65 free_cpumask_var(cfd
->cpumask_ipi
);
66 free_percpu(cfd
->csd
);
74 static struct notifier_block hotplug_cfd_notifier
= {
75 .notifier_call
= hotplug_cfd
,
78 void __init
call_function_init(void)
80 void *cpu
= (void *)(long)smp_processor_id();
83 for_each_possible_cpu(i
)
84 init_llist_head(&per_cpu(call_single_queue
, i
));
86 hotplug_cfd(&hotplug_cfd_notifier
, CPU_UP_PREPARE
, cpu
);
87 register_cpu_notifier(&hotplug_cfd_notifier
);
91 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
93 * For non-synchronous ipi calls the csd can still be in use by the
94 * previous function call. For multi-cpu calls its even more interesting
95 * as we'll have to ensure no other cpu is observing our csd.
97 static void csd_lock_wait(struct call_single_data
*csd
)
99 while (csd
->flags
& CSD_FLAG_LOCK
)
103 static void csd_lock(struct call_single_data
*csd
)
106 csd
->flags
|= CSD_FLAG_LOCK
;
109 * prevent CPU from reordering the above assignment
110 * to ->flags with any subsequent assignments to other
111 * fields of the specified call_single_data structure:
116 static void csd_unlock(struct call_single_data
*csd
)
118 WARN_ON((csd
->flags
& CSD_FLAG_WAIT
) && !(csd
->flags
& CSD_FLAG_LOCK
));
121 * ensure we're all done before releasing data:
125 csd
->flags
&= ~CSD_FLAG_LOCK
;
129 * Insert a previously allocated call_single_data element
130 * for execution on the given CPU. data must already have
131 * ->func, ->info, and ->flags set.
133 static void generic_exec_single(int cpu
, struct call_single_data
*csd
, int wait
)
136 csd
->flags
|= CSD_FLAG_WAIT
;
139 * The list addition should be visible before sending the IPI
140 * handler locks the list to pull the entry off it because of
141 * normal cache coherency rules implied by spinlocks.
143 * If IPIs can go out of order to the cache coherency protocol
144 * in an architecture, sufficient synchronisation should be added
145 * to arch code to make it appear to obey cache coherency WRT
146 * locking and barrier primitives. Generic code isn't really
147 * equipped to do the right thing...
149 if (llist_add(&csd
->llist
, &per_cpu(call_single_queue
, cpu
)))
150 arch_send_call_function_single_ipi(cpu
);
157 * Invoked by arch to handle an IPI for call function single. Must be
158 * called from the arch with interrupts disabled.
160 void generic_smp_call_function_single_interrupt(void)
162 struct llist_node
*entry
, *next
;
165 * Shouldn't receive this interrupt on a cpu that is not yet online.
167 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
169 entry
= llist_del_all(&__get_cpu_var(call_single_queue
));
170 entry
= llist_reverse_order(entry
);
173 struct call_single_data
*csd
;
177 csd
= llist_entry(entry
, struct call_single_data
, llist
);
178 csd
->func(csd
->info
);
185 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data
, csd_data
);
188 * smp_call_function_single - Run a function on a specific CPU
189 * @func: The function to run. This must be fast and non-blocking.
190 * @info: An arbitrary pointer to pass to the function.
191 * @wait: If true, wait until function has completed on other CPUs.
193 * Returns 0 on success, else a negative status code.
195 int smp_call_function_single(int cpu
, smp_call_func_t func
, void *info
,
198 struct call_single_data d
= {
206 * prevent preemption and reschedule on another processor,
207 * as well as CPU removal
209 this_cpu
= get_cpu();
212 * Can deadlock when called with interrupts disabled.
213 * We allow cpu's that are not yet online though, as no one else can
214 * send smp call function interrupt to this cpu and as such deadlocks
217 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
218 && !oops_in_progress
);
220 if (cpu
== this_cpu
) {
221 local_irq_save(flags
);
223 local_irq_restore(flags
);
225 if ((unsigned)cpu
< nr_cpu_ids
&& cpu_online(cpu
)) {
226 struct call_single_data
*csd
= &d
;
229 csd
= &__get_cpu_var(csd_data
);
235 generic_exec_single(cpu
, csd
, wait
);
237 err
= -ENXIO
; /* CPU not online */
245 EXPORT_SYMBOL(smp_call_function_single
);
248 * smp_call_function_any - Run a function on any of the given cpus
249 * @mask: The mask of cpus it can run on.
250 * @func: The function to run. This must be fast and non-blocking.
251 * @info: An arbitrary pointer to pass to the function.
252 * @wait: If true, wait until function has completed.
254 * Returns 0 on success, else a negative status code (if no cpus were online).
256 * Selection preference:
257 * 1) current cpu if in @mask
258 * 2) any cpu of current node if in @mask
259 * 3) any other online cpu in @mask
261 int smp_call_function_any(const struct cpumask
*mask
,
262 smp_call_func_t func
, void *info
, int wait
)
265 const struct cpumask
*nodemask
;
268 /* Try for same CPU (cheapest) */
270 if (cpumask_test_cpu(cpu
, mask
))
273 /* Try for same node. */
274 nodemask
= cpumask_of_node(cpu_to_node(cpu
));
275 for (cpu
= cpumask_first_and(nodemask
, mask
); cpu
< nr_cpu_ids
;
276 cpu
= cpumask_next_and(cpu
, nodemask
, mask
)) {
281 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
282 cpu
= cpumask_any_and(mask
, cpu_online_mask
);
284 ret
= smp_call_function_single(cpu
, func
, info
, wait
);
288 EXPORT_SYMBOL_GPL(smp_call_function_any
);
291 * __smp_call_function_single(): Run a function on a specific CPU
292 * @cpu: The CPU to run on.
293 * @data: Pre-allocated and setup data structure
294 * @wait: If true, wait until function has completed on specified CPU.
296 * Like smp_call_function_single(), but allow caller to pass in a
297 * pre-allocated data structure. Useful for embedding @data inside
298 * other structures, for instance.
300 void __smp_call_function_single(int cpu
, struct call_single_data
*csd
,
303 unsigned int this_cpu
;
306 this_cpu
= get_cpu();
308 * Can deadlock when called with interrupts disabled.
309 * We allow cpu's that are not yet online though, as no one else can
310 * send smp call function interrupt to this cpu and as such deadlocks
313 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait
&& irqs_disabled()
314 && !oops_in_progress
);
316 if (cpu
== this_cpu
) {
317 local_irq_save(flags
);
318 csd
->func(csd
->info
);
319 local_irq_restore(flags
);
322 generic_exec_single(cpu
, csd
, wait
);
326 EXPORT_SYMBOL_GPL(__smp_call_function_single
);
329 * smp_call_function_many(): Run a function on a set of other CPUs.
330 * @mask: The set of cpus to run on (only runs on online subset).
331 * @func: The function to run. This must be fast and non-blocking.
332 * @info: An arbitrary pointer to pass to the function.
333 * @wait: If true, wait (atomically) until function has completed
336 * If @wait is true, then returns once @func has returned.
338 * You must not call this function with disabled interrupts or from a
339 * hardware interrupt handler or from a bottom half handler. Preemption
340 * must be disabled when calling this function.
342 void smp_call_function_many(const struct cpumask
*mask
,
343 smp_call_func_t func
, void *info
, bool wait
)
345 struct call_function_data
*cfd
;
346 int cpu
, next_cpu
, this_cpu
= smp_processor_id();
349 * Can deadlock when called with interrupts disabled.
350 * We allow cpu's that are not yet online though, as no one else can
351 * send smp call function interrupt to this cpu and as such deadlocks
354 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
355 && !oops_in_progress
&& !early_boot_irqs_disabled
);
357 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
358 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
360 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
362 /* No online cpus? We're done. */
363 if (cpu
>= nr_cpu_ids
)
366 /* Do we have another CPU which isn't us? */
367 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
368 if (next_cpu
== this_cpu
)
369 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
371 /* Fastpath: do that cpu by itself. */
372 if (next_cpu
>= nr_cpu_ids
) {
373 smp_call_function_single(cpu
, func
, info
, wait
);
377 cfd
= &__get_cpu_var(cfd_data
);
379 cpumask_and(cfd
->cpumask
, mask
, cpu_online_mask
);
380 cpumask_clear_cpu(this_cpu
, cfd
->cpumask
);
382 /* Some callers race with other cpus changing the passed mask */
383 if (unlikely(!cpumask_weight(cfd
->cpumask
)))
387 * After we put an entry into the list, cfd->cpumask may be cleared
388 * again when another CPU sends another IPI for a SMP function call, so
389 * cfd->cpumask will be zero.
391 cpumask_copy(cfd
->cpumask_ipi
, cfd
->cpumask
);
393 for_each_cpu(cpu
, cfd
->cpumask
) {
394 struct call_single_data
*csd
= per_cpu_ptr(cfd
->csd
, cpu
);
399 llist_add(&csd
->llist
, &per_cpu(call_single_queue
, cpu
));
402 /* Send a message to all CPUs in the map */
403 arch_send_call_function_ipi_mask(cfd
->cpumask_ipi
);
406 for_each_cpu(cpu
, cfd
->cpumask
) {
407 struct call_single_data
*csd
;
409 csd
= per_cpu_ptr(cfd
->csd
, cpu
);
414 EXPORT_SYMBOL(smp_call_function_many
);
417 * smp_call_function(): Run a function on all other CPUs.
418 * @func: The function to run. This must be fast and non-blocking.
419 * @info: An arbitrary pointer to pass to the function.
420 * @wait: If true, wait (atomically) until function has completed
425 * If @wait is true, then returns once @func has returned; otherwise
426 * it returns just before the target cpu calls @func.
428 * You must not call this function with disabled interrupts or from a
429 * hardware interrupt handler or from a bottom half handler.
431 int smp_call_function(smp_call_func_t func
, void *info
, int wait
)
434 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
439 EXPORT_SYMBOL(smp_call_function
);
441 /* Setup configured maximum number of CPUs to activate */
442 unsigned int setup_max_cpus
= NR_CPUS
;
443 EXPORT_SYMBOL(setup_max_cpus
);
447 * Setup routine for controlling SMP activation
449 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
450 * activation entirely (the MPS table probe still happens, though).
452 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
453 * greater than 0, limits the maximum number of CPUs activated in
457 void __weak
arch_disable_smp_support(void) { }
459 static int __init
nosmp(char *str
)
462 arch_disable_smp_support();
467 early_param("nosmp", nosmp
);
469 /* this is hard limit */
470 static int __init
nrcpus(char *str
)
474 get_option(&str
, &nr_cpus
);
475 if (nr_cpus
> 0 && nr_cpus
< nr_cpu_ids
)
476 nr_cpu_ids
= nr_cpus
;
481 early_param("nr_cpus", nrcpus
);
483 static int __init
maxcpus(char *str
)
485 get_option(&str
, &setup_max_cpus
);
486 if (setup_max_cpus
== 0)
487 arch_disable_smp_support();
492 early_param("maxcpus", maxcpus
);
494 /* Setup number of possible processor ids */
495 int nr_cpu_ids __read_mostly
= NR_CPUS
;
496 EXPORT_SYMBOL(nr_cpu_ids
);
498 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
499 void __init
setup_nr_cpu_ids(void)
501 nr_cpu_ids
= find_last_bit(cpumask_bits(cpu_possible_mask
),NR_CPUS
) + 1;
504 void __weak
smp_announce(void)
506 printk(KERN_INFO
"Brought up %d CPUs\n", num_online_cpus());
509 /* Called by boot processor to activate the rest. */
510 void __init
smp_init(void)
516 /* FIXME: This should be done in userspace --RR */
517 for_each_present_cpu(cpu
) {
518 if (num_online_cpus() >= setup_max_cpus
)
520 if (!cpu_online(cpu
))
524 /* Any cleanup work */
526 smp_cpus_done(setup_max_cpus
);
530 * Call a function on all processors. May be used during early boot while
531 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
532 * of local_irq_disable/enable().
534 int on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
540 ret
= smp_call_function(func
, info
, wait
);
541 local_irq_save(flags
);
543 local_irq_restore(flags
);
547 EXPORT_SYMBOL(on_each_cpu
);
550 * on_each_cpu_mask(): Run a function on processors specified by
551 * cpumask, which may include the local processor.
552 * @mask: The set of cpus to run on (only runs on online subset).
553 * @func: The function to run. This must be fast and non-blocking.
554 * @info: An arbitrary pointer to pass to the function.
555 * @wait: If true, wait (atomically) until function has completed
558 * If @wait is true, then returns once @func has returned.
560 * You must not call this function with disabled interrupts or from a
561 * hardware interrupt handler or from a bottom half handler. The
562 * exception is that it may be used during early boot while
563 * early_boot_irqs_disabled is set.
565 void on_each_cpu_mask(const struct cpumask
*mask
, smp_call_func_t func
,
566 void *info
, bool wait
)
570 smp_call_function_many(mask
, func
, info
, wait
);
571 if (cpumask_test_cpu(cpu
, mask
)) {
573 local_irq_save(flags
);
575 local_irq_restore(flags
);
579 EXPORT_SYMBOL(on_each_cpu_mask
);
582 * on_each_cpu_cond(): Call a function on each processor for which
583 * the supplied function cond_func returns true, optionally waiting
584 * for all the required CPUs to finish. This may include the local
586 * @cond_func: A callback function that is passed a cpu id and
587 * the the info parameter. The function is called
588 * with preemption disabled. The function should
589 * return a blooean value indicating whether to IPI
591 * @func: The function to run on all applicable CPUs.
592 * This must be fast and non-blocking.
593 * @info: An arbitrary pointer to pass to both functions.
594 * @wait: If true, wait (atomically) until function has
595 * completed on other CPUs.
596 * @gfp_flags: GFP flags to use when allocating the cpumask
597 * used internally by the function.
599 * The function might sleep if the GFP flags indicates a non
600 * atomic allocation is allowed.
602 * Preemption is disabled to protect against CPUs going offline but not online.
603 * CPUs going online during the call will not be seen or sent an IPI.
605 * You must not call this function with disabled interrupts or
606 * from a hardware interrupt handler or from a bottom half handler.
608 void on_each_cpu_cond(bool (*cond_func
)(int cpu
, void *info
),
609 smp_call_func_t func
, void *info
, bool wait
,
615 might_sleep_if(gfp_flags
& __GFP_WAIT
);
617 if (likely(zalloc_cpumask_var(&cpus
, (gfp_flags
|__GFP_NOWARN
)))) {
619 for_each_online_cpu(cpu
)
620 if (cond_func(cpu
, info
))
621 cpumask_set_cpu(cpu
, cpus
);
622 on_each_cpu_mask(cpus
, func
, info
, wait
);
624 free_cpumask_var(cpus
);
627 * No free cpumask, bother. No matter, we'll
628 * just have to IPI them one by one.
631 for_each_online_cpu(cpu
)
632 if (cond_func(cpu
, info
)) {
633 ret
= smp_call_function_single(cpu
, func
,
640 EXPORT_SYMBOL(on_each_cpu_cond
);
642 static void do_nothing(void *unused
)
647 * kick_all_cpus_sync - Force all cpus out of idle
649 * Used to synchronize the update of pm_idle function pointer. It's
650 * called after the pointer is updated and returns after the dummy
651 * callback function has been executed on all cpus. The execution of
652 * the function can only happen on the remote cpus after they have
653 * left the idle function which had been called via pm_idle function
654 * pointer. So it's guaranteed that nothing uses the previous pointer
657 void kick_all_cpus_sync(void)
659 /* Make sure the change is visible before we kick the cpus */
661 smp_call_function(do_nothing
, NULL
, 1);
663 EXPORT_SYMBOL_GPL(kick_all_cpus_sync
);