Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_STOP_MACHINE |
2 | #define _LINUX_STOP_MACHINE | |
1142d810 | 3 | |
1da177e4 | 4 | #include <linux/cpu.h> |
eeec4fad | 5 | #include <linux/cpumask.h> |
bb2eac66 | 6 | #include <linux/smp.h> |
1142d810 | 7 | #include <linux/list.h> |
1da177e4 | 8 | |
1142d810 TH |
9 | /* |
10 | * stop_cpu[s]() is simplistic per-cpu maximum priority cpu | |
11 | * monopolization mechanism. The caller can specify a non-sleeping | |
12 | * function to be executed on a single or multiple cpus preempting all | |
13 | * other processes and monopolizing those cpus until it finishes. | |
14 | * | |
15 | * Resources for this mechanism are preallocated when a cpu is brought | |
16 | * up and requests are guaranteed to be served as long as the target | |
17 | * cpus are online. | |
18 | */ | |
1142d810 TH |
19 | typedef int (*cpu_stop_fn_t)(void *arg); |
20 | ||
bbf1bb3e TH |
21 | #ifdef CONFIG_SMP |
22 | ||
1142d810 TH |
23 | struct cpu_stop_work { |
24 | struct list_head list; /* cpu_stopper->works */ | |
25 | cpu_stop_fn_t fn; | |
26 | void *arg; | |
27 | struct cpu_stop_done *done; | |
28 | }; | |
29 | ||
30 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); | |
1be0bd77 | 31 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); |
1b034bd9 | 32 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
1142d810 TH |
33 | struct cpu_stop_work *work_buf); |
34 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | |
35 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | |
233e7f26 | 36 | void stop_machine_park(int cpu); |
c00166d8 | 37 | void stop_machine_unpark(int cpu); |
1142d810 | 38 | |
bbf1bb3e TH |
39 | #else /* CONFIG_SMP */ |
40 | ||
41 | #include <linux/workqueue.h> | |
42 | ||
43 | struct cpu_stop_work { | |
44 | struct work_struct work; | |
45 | cpu_stop_fn_t fn; | |
46 | void *arg; | |
47 | }; | |
48 | ||
49 | static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) | |
50 | { | |
51 | int ret = -ENOENT; | |
52 | preempt_disable(); | |
53 | if (cpu == smp_processor_id()) | |
54 | ret = fn(arg); | |
55 | preempt_enable(); | |
56 | return ret; | |
57 | } | |
58 | ||
59 | static void stop_one_cpu_nowait_workfn(struct work_struct *work) | |
60 | { | |
61 | struct cpu_stop_work *stwork = | |
62 | container_of(work, struct cpu_stop_work, work); | |
63 | preempt_disable(); | |
64 | stwork->fn(stwork->arg); | |
65 | preempt_enable(); | |
66 | } | |
67 | ||
1b034bd9 | 68 | static inline bool stop_one_cpu_nowait(unsigned int cpu, |
bbf1bb3e TH |
69 | cpu_stop_fn_t fn, void *arg, |
70 | struct cpu_stop_work *work_buf) | |
71 | { | |
72 | if (cpu == smp_processor_id()) { | |
73 | INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); | |
74 | work_buf->fn = fn; | |
75 | work_buf->arg = arg; | |
76 | schedule_work(&work_buf->work); | |
1b034bd9 | 77 | return true; |
bbf1bb3e | 78 | } |
1b034bd9 ON |
79 | |
80 | return false; | |
bbf1bb3e TH |
81 | } |
82 | ||
83 | static inline int stop_cpus(const struct cpumask *cpumask, | |
84 | cpu_stop_fn_t fn, void *arg) | |
85 | { | |
86 | if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) | |
87 | return stop_one_cpu(raw_smp_processor_id(), fn, arg); | |
88 | return -ENOENT; | |
89 | } | |
90 | ||
91 | static inline int try_stop_cpus(const struct cpumask *cpumask, | |
92 | cpu_stop_fn_t fn, void *arg) | |
93 | { | |
94 | return stop_cpus(cpumask, fn, arg); | |
95 | } | |
96 | ||
97 | #endif /* CONFIG_SMP */ | |
98 | ||
1142d810 TH |
99 | /* |
100 | * stop_machine "Bogolock": stop the entire machine, disable | |
101 | * interrupts. This is a very heavy lock, which is equivalent to | |
102 | * grabbing every spinlock (and more). So the "read" side to such a | |
1816315b | 103 | * lock is anything which disables preemption. |
1142d810 | 104 | */ |
86fffe4a | 105 | #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
1142d810 | 106 | |
1da177e4 | 107 | /** |
eeec4fad | 108 | * stop_machine: freeze the machine on all CPUs and run this function |
1da177e4 LT |
109 | * @fn: the function to run |
110 | * @data: the data ptr for the @fn() | |
eeec4fad | 111 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
1da177e4 | 112 | * |
ffdb5976 | 113 | * Description: This causes a thread to be scheduled on every cpu, |
25985edc | 114 | * each of which disables interrupts. The result is that no one is |
ffdb5976 RR |
115 | * holding a spinlock or inside any other preempt-disabled region when |
116 | * @fn() runs. | |
1da177e4 LT |
117 | * |
118 | * This can be thought of as a very heavy write lock, equivalent to | |
119 | * grabbing every spinlock in the kernel. */ | |
9a301f22 | 120 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); |
1da177e4 | 121 | |
9a301f22 | 122 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
f740e6cd | 123 | const struct cpumask *cpus); |
86fffe4a | 124 | #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
1da177e4 | 125 | |
9a301f22 | 126 | static inline int stop_machine(cpu_stop_fn_t fn, void *data, |
087a4eb5 | 127 | const struct cpumask *cpus) |
1da177e4 | 128 | { |
f740e6cd | 129 | unsigned long flags; |
1da177e4 | 130 | int ret; |
f740e6cd | 131 | local_irq_save(flags); |
1da177e4 | 132 | ret = fn(data); |
f740e6cd | 133 | local_irq_restore(flags); |
1da177e4 LT |
134 | return ret; |
135 | } | |
9ea09af3 | 136 | |
9a301f22 | 137 | static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
f740e6cd TH |
138 | const struct cpumask *cpus) |
139 | { | |
7eeb088e | 140 | return stop_machine(fn, data, cpus); |
f740e6cd TH |
141 | } |
142 | ||
86fffe4a | 143 | #endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
bbf1bb3e | 144 | #endif /* _LINUX_STOP_MACHINE */ |