Commit | Line | Data |
---|---|---|
3d442233 JA |
1 | /* |
2 | * Generic helpers for smp ipi calls | |
3 | * | |
4 | * (C) Jens Axboe <jens.axboe@oracle.com> 2008 | |
5 | * | |
6 | */ | |
7 | #include <linux/init.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/percpu.h> | |
10 | #include <linux/rcupdate.h> | |
59190f42 | 11 | #include <linux/rculist.h> |
3d442233 JA |
12 | #include <linux/smp.h> |
13 | ||
14 | static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); | |
15 | static LIST_HEAD(call_function_queue); | |
16 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); | |
17 | ||
18 | enum { | |
19 | CSD_FLAG_WAIT = 0x01, | |
20 | CSD_FLAG_ALLOC = 0x02, | |
21 | }; | |
22 | ||
23 | struct call_function_data { | |
24 | struct call_single_data csd; | |
25 | spinlock_t lock; | |
26 | unsigned int refs; | |
27 | cpumask_t cpumask; | |
28 | struct rcu_head rcu_head; | |
29 | }; | |
30 | ||
31 | struct call_single_queue { | |
32 | struct list_head list; | |
33 | spinlock_t lock; | |
34 | }; | |
35 | ||
36 | void __cpuinit init_call_single_data(void) | |
37 | { | |
38 | int i; | |
39 | ||
40 | for_each_possible_cpu(i) { | |
41 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | |
42 | ||
43 | spin_lock_init(&q->lock); | |
44 | INIT_LIST_HEAD(&q->list); | |
45 | } | |
46 | } | |
47 | ||
48 | static void csd_flag_wait(struct call_single_data *data) | |
49 | { | |
50 | /* Wait for response */ | |
51 | do { | |
52 | /* | |
53 | * We need to see the flags store in the IPI handler | |
54 | */ | |
55 | smp_mb(); | |
56 | if (!(data->flags & CSD_FLAG_WAIT)) | |
57 | break; | |
58 | cpu_relax(); | |
59 | } while (1); | |
60 | } | |
61 | ||
62 | /* | |
63 | * Insert a previously allocated call_single_data element for execution | |
64 | * on the given CPU. data must already have ->func, ->info, and ->flags set. | |
65 | */ | |
66 | static void generic_exec_single(int cpu, struct call_single_data *data) | |
67 | { | |
68 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | |
69 | int wait = data->flags & CSD_FLAG_WAIT, ipi; | |
70 | unsigned long flags; | |
71 | ||
72 | spin_lock_irqsave(&dst->lock, flags); | |
73 | ipi = list_empty(&dst->list); | |
74 | list_add_tail(&data->list, &dst->list); | |
75 | spin_unlock_irqrestore(&dst->lock, flags); | |
76 | ||
77 | if (ipi) | |
78 | arch_send_call_function_single_ipi(cpu); | |
79 | ||
80 | if (wait) | |
81 | csd_flag_wait(data); | |
82 | } | |
83 | ||
84 | static void rcu_free_call_data(struct rcu_head *head) | |
85 | { | |
86 | struct call_function_data *data; | |
87 | ||
88 | data = container_of(head, struct call_function_data, rcu_head); | |
89 | ||
90 | kfree(data); | |
91 | } | |
92 | ||
93 | /* | |
94 | * Invoked by arch to handle an IPI for call function. Must be called with | |
95 | * interrupts disabled. | |
96 | */ | |
97 | void generic_smp_call_function_interrupt(void) | |
98 | { | |
99 | struct call_function_data *data; | |
100 | int cpu = get_cpu(); | |
101 | ||
102 | /* | |
103 | * It's ok to use list_for_each_rcu() here even though we may delete | |
104 | * 'pos', since list_del_rcu() doesn't clear ->next | |
105 | */ | |
106 | rcu_read_lock(); | |
107 | list_for_each_entry_rcu(data, &call_function_queue, csd.list) { | |
108 | int refs; | |
109 | ||
110 | if (!cpu_isset(cpu, data->cpumask)) | |
111 | continue; | |
112 | ||
113 | data->csd.func(data->csd.info); | |
114 | ||
115 | spin_lock(&data->lock); | |
116 | cpu_clear(cpu, data->cpumask); | |
117 | WARN_ON(data->refs == 0); | |
118 | data->refs--; | |
119 | refs = data->refs; | |
120 | spin_unlock(&data->lock); | |
121 | ||
122 | if (refs) | |
123 | continue; | |
124 | ||
125 | spin_lock(&call_function_lock); | |
126 | list_del_rcu(&data->csd.list); | |
127 | spin_unlock(&call_function_lock); | |
128 | ||
129 | if (data->csd.flags & CSD_FLAG_WAIT) { | |
130 | /* | |
131 | * serialize stores to data with the flag clear | |
132 | * and wakeup | |
133 | */ | |
134 | smp_wmb(); | |
135 | data->csd.flags &= ~CSD_FLAG_WAIT; | |
136 | } else | |
137 | call_rcu(&data->rcu_head, rcu_free_call_data); | |
138 | } | |
139 | rcu_read_unlock(); | |
140 | ||
141 | put_cpu(); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Invoked by arch to handle an IPI for call function single. Must be called | |
146 | * from the arch with interrupts disabled. | |
147 | */ | |
148 | void generic_smp_call_function_single_interrupt(void) | |
149 | { | |
150 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | |
151 | LIST_HEAD(list); | |
152 | ||
153 | /* | |
154 | * Need to see other stores to list head for checking whether | |
155 | * list is empty without holding q->lock | |
156 | */ | |
157 | smp_mb(); | |
158 | while (!list_empty(&q->list)) { | |
159 | unsigned int data_flags; | |
160 | ||
161 | spin_lock(&q->lock); | |
162 | list_replace_init(&q->list, &list); | |
163 | spin_unlock(&q->lock); | |
164 | ||
165 | while (!list_empty(&list)) { | |
166 | struct call_single_data *data; | |
167 | ||
168 | data = list_entry(list.next, struct call_single_data, | |
169 | list); | |
170 | list_del(&data->list); | |
171 | ||
172 | /* | |
173 | * 'data' can be invalid after this call if | |
174 | * flags == 0 (when called through | |
175 | * generic_exec_single(), so save them away before | |
176 | * making the call. | |
177 | */ | |
178 | data_flags = data->flags; | |
179 | ||
180 | data->func(data->info); | |
181 | ||
182 | if (data_flags & CSD_FLAG_WAIT) { | |
183 | smp_wmb(); | |
184 | data->flags &= ~CSD_FLAG_WAIT; | |
185 | } else if (data_flags & CSD_FLAG_ALLOC) | |
186 | kfree(data); | |
187 | } | |
188 | /* | |
189 | * See comment on outer loop | |
190 | */ | |
191 | smp_mb(); | |
192 | } | |
193 | } | |
194 | ||
195 | /* | |
196 | * smp_call_function_single - Run a function on a specific CPU | |
197 | * @func: The function to run. This must be fast and non-blocking. | |
198 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
199 | * @wait: If true, wait until function has completed on other CPUs. |
200 | * | |
201 | * Returns 0 on success, else a negative status code. Note that @wait | |
202 | * will be implicitly turned on in case of allocation failures, since | |
203 | * we fall back to on-stack allocation. | |
204 | */ | |
205 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
8691e5a8 | 206 | int wait) |
3d442233 JA |
207 | { |
208 | struct call_single_data d; | |
209 | unsigned long flags; | |
210 | /* prevent preemption and reschedule on another processor */ | |
211 | int me = get_cpu(); | |
212 | ||
213 | /* Can deadlock when called with interrupts disabled */ | |
214 | WARN_ON(irqs_disabled()); | |
215 | ||
216 | if (cpu == me) { | |
217 | local_irq_save(flags); | |
218 | func(info); | |
219 | local_irq_restore(flags); | |
220 | } else { | |
221 | struct call_single_data *data = NULL; | |
222 | ||
223 | if (!wait) { | |
224 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | |
225 | if (data) | |
226 | data->flags = CSD_FLAG_ALLOC; | |
227 | } | |
228 | if (!data) { | |
229 | data = &d; | |
230 | data->flags = CSD_FLAG_WAIT; | |
231 | } | |
232 | ||
233 | data->func = func; | |
234 | data->info = info; | |
235 | generic_exec_single(cpu, data); | |
236 | } | |
237 | ||
238 | put_cpu(); | |
239 | return 0; | |
240 | } | |
241 | EXPORT_SYMBOL(smp_call_function_single); | |
242 | ||
243 | /** | |
244 | * __smp_call_function_single(): Run a function on another CPU | |
245 | * @cpu: The CPU to run on. | |
246 | * @data: Pre-allocated and setup data structure | |
247 | * | |
248 | * Like smp_call_function_single(), but allow caller to pass in a pre-allocated | |
249 | * data structure. Useful for embedding @data inside other structures, for | |
250 | * instance. | |
251 | * | |
252 | */ | |
253 | void __smp_call_function_single(int cpu, struct call_single_data *data) | |
254 | { | |
255 | /* Can deadlock when called with interrupts disabled */ | |
256 | WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); | |
257 | ||
258 | generic_exec_single(cpu, data); | |
259 | } | |
260 | ||
261 | /** | |
262 | * smp_call_function_mask(): Run a function on a set of other CPUs. | |
263 | * @mask: The set of cpus to run on. | |
264 | * @func: The function to run. This must be fast and non-blocking. | |
265 | * @info: An arbitrary pointer to pass to the function. | |
266 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
267 | * | |
268 | * Returns 0 on success, else a negative status code. | |
269 | * | |
270 | * If @wait is true, then returns once @func has returned. Note that @wait | |
271 | * will be implicitly turned on in case of allocation failures, since | |
272 | * we fall back to on-stack allocation. | |
273 | * | |
274 | * You must not call this function with disabled interrupts or from a | |
275 | * hardware interrupt handler or from a bottom half handler. Preemption | |
276 | * must be disabled when calling this function. | |
277 | */ | |
278 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |
279 | int wait) | |
280 | { | |
281 | struct call_function_data d; | |
282 | struct call_function_data *data = NULL; | |
283 | cpumask_t allbutself; | |
284 | unsigned long flags; | |
285 | int cpu, num_cpus; | |
286 | ||
287 | /* Can deadlock when called with interrupts disabled */ | |
288 | WARN_ON(irqs_disabled()); | |
289 | ||
290 | cpu = smp_processor_id(); | |
291 | allbutself = cpu_online_map; | |
292 | cpu_clear(cpu, allbutself); | |
293 | cpus_and(mask, mask, allbutself); | |
294 | num_cpus = cpus_weight(mask); | |
295 | ||
296 | /* | |
297 | * If zero CPUs, return. If just a single CPU, turn this request | |
298 | * into a targetted single call instead since it's faster. | |
299 | */ | |
300 | if (!num_cpus) | |
301 | return 0; | |
302 | else if (num_cpus == 1) { | |
303 | cpu = first_cpu(mask); | |
ce0d1b6f | 304 | return smp_call_function_single(cpu, func, info, wait); |
3d442233 JA |
305 | } |
306 | ||
307 | if (!wait) { | |
308 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | |
309 | if (data) | |
310 | data->csd.flags = CSD_FLAG_ALLOC; | |
311 | } | |
312 | if (!data) { | |
313 | data = &d; | |
314 | data->csd.flags = CSD_FLAG_WAIT; | |
63cf13b7 | 315 | wait = 1; |
3d442233 JA |
316 | } |
317 | ||
318 | spin_lock_init(&data->lock); | |
319 | data->csd.func = func; | |
320 | data->csd.info = info; | |
321 | data->refs = num_cpus; | |
322 | data->cpumask = mask; | |
323 | ||
324 | spin_lock_irqsave(&call_function_lock, flags); | |
325 | list_add_tail_rcu(&data->csd.list, &call_function_queue); | |
326 | spin_unlock_irqrestore(&call_function_lock, flags); | |
327 | ||
328 | /* Send a message to all CPUs in the map */ | |
329 | arch_send_call_function_ipi(mask); | |
330 | ||
331 | /* optionally wait for the CPUs to complete */ | |
332 | if (wait) | |
333 | csd_flag_wait(&data->csd); | |
334 | ||
335 | return 0; | |
336 | } | |
337 | EXPORT_SYMBOL(smp_call_function_mask); | |
338 | ||
339 | /** | |
340 | * smp_call_function(): Run a function on all other CPUs. | |
341 | * @func: The function to run. This must be fast and non-blocking. | |
342 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
343 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
344 | * | |
345 | * Returns 0 on success, else a negative status code. | |
346 | * | |
347 | * If @wait is true, then returns once @func has returned; otherwise | |
348 | * it returns just before the target cpu calls @func. In case of allocation | |
349 | * failure, @wait will be implicitly turned on. | |
350 | * | |
351 | * You must not call this function with disabled interrupts or from a | |
352 | * hardware interrupt handler or from a bottom half handler. | |
353 | */ | |
8691e5a8 | 354 | int smp_call_function(void (*func)(void *), void *info, int wait) |
3d442233 JA |
355 | { |
356 | int ret; | |
357 | ||
358 | preempt_disable(); | |
359 | ret = smp_call_function_mask(cpu_online_map, func, info, wait); | |
360 | preempt_enable(); | |
361 | return ret; | |
362 | } | |
363 | EXPORT_SYMBOL(smp_call_function); | |
364 | ||
365 | void ipi_call_lock(void) | |
366 | { | |
367 | spin_lock(&call_function_lock); | |
368 | } | |
369 | ||
370 | void ipi_call_unlock(void) | |
371 | { | |
372 | spin_unlock(&call_function_lock); | |
373 | } | |
374 | ||
375 | void ipi_call_lock_irq(void) | |
376 | { | |
377 | spin_lock_irq(&call_function_lock); | |
378 | } | |
379 | ||
380 | void ipi_call_unlock_irq(void) | |
381 | { | |
382 | spin_unlock_irq(&call_function_lock); | |
383 | } |