cpu_stop: implement stop_cpu[s]()
[deliverable/linux.git] / include / linux / stop_machine.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_STOP_MACHINE
2#define _LINUX_STOP_MACHINE
1142d810 3
1da177e4 4#include <linux/cpu.h>
eeec4fad 5#include <linux/cpumask.h>
1142d810 6#include <linux/list.h>
1da177e4
LT
7#include <asm/system.h>
8
9#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
5c2aed62 10
1142d810
TH
11/*
12 * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
13 * monopolization mechanism. The caller can specify a non-sleeping
14 * function to be executed on a single or multiple cpus preempting all
15 * other processes and monopolizing those cpus until it finishes.
16 *
17 * Resources for this mechanism are preallocated when a cpu is brought
18 * up and requests are guaranteed to be served as long as the target
19 * cpus are online.
20 */
21
22typedef int (*cpu_stop_fn_t)(void *arg);
23
24struct cpu_stop_work {
25 struct list_head list; /* cpu_stopper->works */
26 cpu_stop_fn_t fn;
27 void *arg;
28 struct cpu_stop_done *done;
29};
30
31int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
32void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
33 struct cpu_stop_work *work_buf);
34int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
35int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
36
37/*
38 * stop_machine "Bogolock": stop the entire machine, disable
39 * interrupts. This is a very heavy lock, which is equivalent to
40 * grabbing every spinlock (and more). So the "read" side to such a
41 * lock is anything which disables preeempt.
42 */
43
1da177e4 44/**
eeec4fad 45 * stop_machine: freeze the machine on all CPUs and run this function
1da177e4
LT
46 * @fn: the function to run
47 * @data: the data ptr for the @fn()
eeec4fad 48 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
1da177e4 49 *
ffdb5976
RR
50 * Description: This causes a thread to be scheduled on every cpu,
51 * each of which disables interrupts. The result is that noone is
52 * holding a spinlock or inside any other preempt-disabled region when
53 * @fn() runs.
1da177e4
LT
54 *
55 * This can be thought of as a very heavy write lock, equivalent to
56 * grabbing every spinlock in the kernel. */
41c7bb95 57int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
1da177e4
LT
58
59/**
eeec4fad 60 * __stop_machine: freeze the machine on all CPUs and run this function
1da177e4
LT
61 * @fn: the function to run
62 * @data: the data ptr for the @fn
eeec4fad 63 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
1da177e4 64 *
ffdb5976
RR
65 * Description: This is a special version of the above, which assumes cpus
66 * won't come or go while it's being called. Used by hotplug cpu.
1da177e4 67 */
41c7bb95 68int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
9ea09af3
HC
69
70/**
71 * stop_machine_create: create all stop_machine threads
72 *
73 * Description: This causes all stop_machine threads to be created before
74 * stop_machine actually gets called. This can be used by subsystems that
75 * need a non failing stop_machine infrastructure.
76 */
77int stop_machine_create(void);
78
79/**
80 * stop_machine_destroy: destroy all stop_machine threads
81 *
82 * Description: This causes all stop_machine threads which were created with
83 * stop_machine_create to be destroyed again.
84 */
85void stop_machine_destroy(void);
86
1da177e4
LT
87#else
88
eeec4fad 89static inline int stop_machine(int (*fn)(void *), void *data,
41c7bb95 90 const struct cpumask *cpus)
1da177e4
LT
91{
92 int ret;
93 local_irq_disable();
94 ret = fn(data);
95 local_irq_enable();
96 return ret;
97}
9ea09af3
HC
98
99static inline int stop_machine_create(void) { return 0; }
100static inline void stop_machine_destroy(void) { }
101
1da177e4
LT
102#endif /* CONFIG_SMP */
103#endif /* _LINUX_STOP_MACHINE */
This page took 0.752223 seconds and 5 git commands to generate.