Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Intel SMP support routines. | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * This code is released under the GNU General Public License version 2 or | |
8 | * later. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | ||
13 | #include <linux/mm.h> | |
1da177e4 LT |
14 | #include <linux/delay.h> |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/smp_lock.h> | |
17 | #include <linux/kernel_stat.h> | |
18 | #include <linux/mc146818rtc.h> | |
19 | #include <linux/cache.h> | |
20 | #include <linux/interrupt.h> | |
f3705136 | 21 | #include <linux/cpu.h> |
129f6946 | 22 | #include <linux/module.h> |
1da177e4 LT |
23 | |
24 | #include <asm/mtrr.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <mach_apic.h> | |
27 | ||
28 | /* | |
29 | * Some notes on x86 processor bugs affecting SMP operation: | |
30 | * | |
31 | * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. | |
32 | * The Linux implications for SMP are handled as follows: | |
33 | * | |
34 | * Pentium III / [Xeon] | |
35 | * None of the E1AP-E3AP errata are visible to the user. | |
36 | * | |
37 | * E1AP. see PII A1AP | |
38 | * E2AP. see PII A2AP | |
39 | * E3AP. see PII A3AP | |
40 | * | |
41 | * Pentium II / [Xeon] | |
42 | * None of the A1AP-A3AP errata are visible to the user. | |
43 | * | |
44 | * A1AP. see PPro 1AP | |
45 | * A2AP. see PPro 2AP | |
46 | * A3AP. see PPro 7AP | |
47 | * | |
48 | * Pentium Pro | |
49 | * None of 1AP-9AP errata are visible to the normal user, | |
50 | * except occasional delivery of 'spurious interrupt' as trap #15. | |
51 | * This is very rare and a non-problem. | |
52 | * | |
53 | * 1AP. Linux maps APIC as non-cacheable | |
54 | * 2AP. worked around in hardware | |
55 | * 3AP. fixed in C0 and above steppings microcode update. | |
56 | * Linux does not use excessive STARTUP_IPIs. | |
57 | * 4AP. worked around in hardware | |
58 | * 5AP. symmetric IO mode (normal Linux operation) not affected. | |
59 | * 'noapic' mode has vector 0xf filled out properly. | |
60 | * 6AP. 'noapic' mode might be affected - fixed in later steppings | |
61 | * 7AP. We do not assume writes to the LVT deassering IRQs | |
62 | * 8AP. We do not enable low power mode (deep sleep) during MP bootup | |
63 | * 9AP. We do not use mixed mode | |
64 | * | |
65 | * Pentium | |
66 | * There is a marginal case where REP MOVS on 100MHz SMP | |
67 | * machines with B stepping processors can fail. XXX should provide | |
68 | * an L1cache=Writethrough or L1cache=off option. | |
69 | * | |
70 | * B stepping CPUs may hang. There are hardware work arounds | |
71 | * for this. We warn about it in case your board doesn't have the work | |
72 | * arounds. Basically thats so I can tell anyone with a B stepping | |
73 | * CPU and SMP problems "tough". | |
74 | * | |
75 | * Specific items [From Pentium Processor Specification Update] | |
76 | * | |
77 | * 1AP. Linux doesn't use remote read | |
78 | * 2AP. Linux doesn't trust APIC errors | |
79 | * 3AP. We work around this | |
80 | * 4AP. Linux never generated 3 interrupts of the same priority | |
81 | * to cause a lost local interrupt. | |
82 | * 5AP. Remote read is never used | |
83 | * 6AP. not affected - worked around in hardware | |
84 | * 7AP. not affected - worked around in hardware | |
85 | * 8AP. worked around in hardware - we get explicit CS errors if not | |
86 | * 9AP. only 'noapic' mode affected. Might generate spurious | |
87 | * interrupts, we log only the first one and count the | |
88 | * rest silently. | |
89 | * 10AP. not affected - worked around in hardware | |
90 | * 11AP. Linux reads the APIC between writes to avoid this, as per | |
91 | * the documentation. Make sure you preserve this as it affects | |
92 | * the C stepping chips too. | |
93 | * 12AP. not affected - worked around in hardware | |
94 | * 13AP. not affected - worked around in hardware | |
95 | * 14AP. we always deassert INIT during bootup | |
96 | * 15AP. not affected - worked around in hardware | |
97 | * 16AP. not affected - worked around in hardware | |
98 | * 17AP. not affected - worked around in hardware | |
99 | * 18AP. not affected - worked around in hardware | |
100 | * 19AP. not affected - worked around in BIOS | |
101 | * | |
102 | * If this sounds worrying believe me these bugs are either ___RARE___, | |
103 | * or are signal timing bugs worked around in hardware and there's | |
104 | * about nothing of note with C stepping upwards. | |
105 | */ | |
106 | ||
107 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; | |
108 | ||
109 | /* | |
110 | * the following functions deal with sending IPIs between CPUs. | |
111 | * | |
112 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. | |
113 | */ | |
114 | ||
115 | static inline int __prepare_ICR (unsigned int shortcut, int vector) | |
116 | { | |
45486f81 KO |
117 | unsigned int icr = shortcut | APIC_DEST_LOGICAL; |
118 | ||
119 | switch (vector) { | |
120 | default: | |
121 | icr |= APIC_DM_FIXED | vector; | |
122 | break; | |
123 | case NMI_VECTOR: | |
124 | icr |= APIC_DM_NMI; | |
125 | break; | |
126 | } | |
127 | return icr; | |
1da177e4 LT |
128 | } |
129 | ||
130 | static inline int __prepare_ICR2 (unsigned int mask) | |
131 | { | |
132 | return SET_APIC_DEST_FIELD(mask); | |
133 | } | |
134 | ||
135 | void __send_IPI_shortcut(unsigned int shortcut, int vector) | |
136 | { | |
137 | /* | |
138 | * Subtle. In the case of the 'never do double writes' workaround | |
139 | * we have to lock out interrupts to be safe. As we don't care | |
140 | * of the value read we use an atomic rmw access to avoid costly | |
141 | * cli/sti. Otherwise we use an even cheaper single atomic write | |
142 | * to the APIC. | |
143 | */ | |
144 | unsigned int cfg; | |
145 | ||
146 | /* | |
147 | * Wait for idle. | |
148 | */ | |
149 | apic_wait_icr_idle(); | |
150 | ||
151 | /* | |
152 | * No need to touch the target chip field | |
153 | */ | |
154 | cfg = __prepare_ICR(shortcut, vector); | |
155 | ||
156 | /* | |
157 | * Send the IPI. The write to APIC_ICR fires this off. | |
158 | */ | |
159 | apic_write_around(APIC_ICR, cfg); | |
160 | } | |
161 | ||
162 | void fastcall send_IPI_self(int vector) | |
163 | { | |
164 | __send_IPI_shortcut(APIC_DEST_SELF, vector); | |
165 | } | |
166 | ||
167 | /* | |
168 | * This is only used on smaller machines. | |
169 | */ | |
170 | void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | |
171 | { | |
172 | unsigned long mask = cpus_addr(cpumask)[0]; | |
173 | unsigned long cfg; | |
174 | unsigned long flags; | |
175 | ||
176 | local_irq_save(flags); | |
f3705136 | 177 | WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); |
1da177e4 LT |
178 | /* |
179 | * Wait for idle. | |
180 | */ | |
181 | apic_wait_icr_idle(); | |
182 | ||
183 | /* | |
184 | * prepare target chip field | |
185 | */ | |
186 | cfg = __prepare_ICR2(mask); | |
187 | apic_write_around(APIC_ICR2, cfg); | |
188 | ||
189 | /* | |
190 | * program the ICR | |
191 | */ | |
192 | cfg = __prepare_ICR(0, vector); | |
193 | ||
194 | /* | |
195 | * Send the IPI. The write to APIC_ICR fires this off. | |
196 | */ | |
197 | apic_write_around(APIC_ICR, cfg); | |
198 | ||
199 | local_irq_restore(flags); | |
200 | } | |
201 | ||
202 | void send_IPI_mask_sequence(cpumask_t mask, int vector) | |
203 | { | |
204 | unsigned long cfg, flags; | |
205 | unsigned int query_cpu; | |
206 | ||
207 | /* | |
208 | * Hack. The clustered APIC addressing mode doesn't allow us to send | |
209 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | |
210 | * should be modified to do 1 message per cluster ID - mbligh | |
211 | */ | |
212 | ||
213 | local_irq_save(flags); | |
214 | ||
215 | for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) { | |
216 | if (cpu_isset(query_cpu, mask)) { | |
217 | ||
218 | /* | |
219 | * Wait for idle. | |
220 | */ | |
221 | apic_wait_icr_idle(); | |
222 | ||
223 | /* | |
224 | * prepare target chip field | |
225 | */ | |
226 | cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu)); | |
227 | apic_write_around(APIC_ICR2, cfg); | |
228 | ||
229 | /* | |
230 | * program the ICR | |
231 | */ | |
232 | cfg = __prepare_ICR(0, vector); | |
233 | ||
234 | /* | |
235 | * Send the IPI. The write to APIC_ICR fires this off. | |
236 | */ | |
237 | apic_write_around(APIC_ICR, cfg); | |
238 | } | |
239 | } | |
240 | local_irq_restore(flags); | |
241 | } | |
242 | ||
243 | #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */ | |
244 | ||
245 | /* | |
246 | * Smarter SMP flushing macros. | |
247 | * c/o Linus Torvalds. | |
248 | * | |
249 | * These mean you can really definitely utterly forget about | |
250 | * writing to user space from interrupts. (Its not allowed anyway). | |
251 | * | |
252 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
253 | */ | |
254 | ||
255 | static cpumask_t flush_cpumask; | |
256 | static struct mm_struct * flush_mm; | |
257 | static unsigned long flush_va; | |
258 | static DEFINE_SPINLOCK(tlbstate_lock); | |
259 | #define FLUSH_ALL 0xffffffff | |
260 | ||
261 | /* | |
262 | * We cannot call mmdrop() because we are in interrupt context, | |
263 | * instead update mm->cpu_vm_mask. | |
264 | * | |
265 | * We need to reload %cr3 since the page tables may be going | |
266 | * away from under us.. | |
267 | */ | |
268 | static inline void leave_mm (unsigned long cpu) | |
269 | { | |
270 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | |
271 | BUG(); | |
272 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | |
273 | load_cr3(swapper_pg_dir); | |
274 | } | |
275 | ||
276 | /* | |
277 | * | |
278 | * The flush IPI assumes that a thread switch happens in this order: | |
279 | * [cpu0: the cpu that switches] | |
280 | * 1) switch_mm() either 1a) or 1b) | |
281 | * 1a) thread switch to a different mm | |
282 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | |
283 | * Stop ipi delivery for the old mm. This is not synchronized with | |
284 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
285 | * for the wrong mm, and in the worst case we perform a superflous | |
286 | * tlb flush. | |
287 | * 1a2) set cpu_tlbstate to TLBSTATE_OK | |
288 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | |
289 | * was in lazy tlb mode. | |
290 | * 1a3) update cpu_tlbstate[].active_mm | |
291 | * Now cpu0 accepts tlb flushes for the new mm. | |
292 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | |
293 | * Now the other cpus will send tlb flush ipis. | |
294 | * 1a4) change cr3. | |
295 | * 1b) thread switch without mm change | |
296 | * cpu_tlbstate[].active_mm is correct, cpu0 already handles | |
297 | * flush ipis. | |
298 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | |
299 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
300 | * Atomically set the bit [other cpus will start sending flush ipis], | |
301 | * and test the bit. | |
302 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
303 | * 2) switch %%esp, ie current | |
304 | * | |
305 | * The interrupt must handle 2 special cases: | |
306 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
307 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
308 | * runs in kernel space, the cpu could load tlb entries for user space | |
309 | * pages. | |
310 | * | |
311 | * The good news is that cpu_tlbstate is local to each cpu, no | |
312 | * write/read ordering problems. | |
313 | */ | |
314 | ||
315 | /* | |
316 | * TLB flush IPI: | |
317 | * | |
318 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
319 | * 2) Leave the mm if we are in the lazy tlb mode. | |
320 | */ | |
321 | ||
322 | fastcall void smp_invalidate_interrupt(struct pt_regs *regs) | |
323 | { | |
324 | unsigned long cpu; | |
325 | ||
326 | cpu = get_cpu(); | |
327 | ||
328 | if (!cpu_isset(cpu, flush_cpumask)) | |
329 | goto out; | |
330 | /* | |
331 | * This was a BUG() but until someone can quote me the | |
332 | * line from the intel manual that guarantees an IPI to | |
333 | * multiple CPUs is retried _only_ on the erroring CPUs | |
334 | * its staying as a return | |
335 | * | |
336 | * BUG(); | |
337 | */ | |
338 | ||
339 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | |
340 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | |
341 | if (flush_va == FLUSH_ALL) | |
342 | local_flush_tlb(); | |
343 | else | |
344 | __flush_tlb_one(flush_va); | |
345 | } else | |
346 | leave_mm(cpu); | |
347 | } | |
348 | ack_APIC_irq(); | |
349 | smp_mb__before_clear_bit(); | |
350 | cpu_clear(cpu, flush_cpumask); | |
351 | smp_mb__after_clear_bit(); | |
352 | out: | |
353 | put_cpu_no_resched(); | |
354 | } | |
355 | ||
356 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |
357 | unsigned long va) | |
358 | { | |
1da177e4 LT |
359 | /* |
360 | * A couple of (to be removed) sanity checks: | |
361 | * | |
1da177e4 LT |
362 | * - current CPU must not be in mask |
363 | * - mask must exist :) | |
364 | */ | |
365 | BUG_ON(cpus_empty(cpumask)); | |
1da177e4 LT |
366 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); |
367 | BUG_ON(!mm); | |
368 | ||
f3705136 ZM |
369 | /* If a CPU which we ran on has gone down, OK. */ |
370 | cpus_and(cpumask, cpumask, cpu_online_map); | |
371 | if (cpus_empty(cpumask)) | |
372 | return; | |
373 | ||
1da177e4 LT |
374 | /* |
375 | * i'm not happy about this global shared spinlock in the | |
376 | * MM hot path, but we'll see how contended it is. | |
377 | * Temporarily this turns IRQs off, so that lockups are | |
378 | * detected by the NMI watchdog. | |
379 | */ | |
380 | spin_lock(&tlbstate_lock); | |
381 | ||
382 | flush_mm = mm; | |
383 | flush_va = va; | |
384 | #if NR_CPUS <= BITS_PER_LONG | |
385 | atomic_set_mask(cpumask, &flush_cpumask); | |
386 | #else | |
387 | { | |
388 | int k; | |
389 | unsigned long *flush_mask = (unsigned long *)&flush_cpumask; | |
390 | unsigned long *cpu_mask = (unsigned long *)&cpumask; | |
391 | for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k) | |
392 | atomic_set_mask(cpu_mask[k], &flush_mask[k]); | |
393 | } | |
394 | #endif | |
395 | /* | |
396 | * We have to send the IPI only to | |
397 | * CPUs affected. | |
398 | */ | |
399 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); | |
400 | ||
401 | while (!cpus_empty(flush_cpumask)) | |
402 | /* nothing. lockup detection does not belong here */ | |
403 | mb(); | |
404 | ||
405 | flush_mm = NULL; | |
406 | flush_va = 0; | |
407 | spin_unlock(&tlbstate_lock); | |
408 | } | |
409 | ||
410 | void flush_tlb_current_task(void) | |
411 | { | |
412 | struct mm_struct *mm = current->mm; | |
413 | cpumask_t cpu_mask; | |
414 | ||
415 | preempt_disable(); | |
416 | cpu_mask = mm->cpu_vm_mask; | |
417 | cpu_clear(smp_processor_id(), cpu_mask); | |
418 | ||
419 | local_flush_tlb(); | |
420 | if (!cpus_empty(cpu_mask)) | |
421 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
422 | preempt_enable(); | |
423 | } | |
424 | ||
425 | void flush_tlb_mm (struct mm_struct * mm) | |
426 | { | |
427 | cpumask_t cpu_mask; | |
428 | ||
429 | preempt_disable(); | |
430 | cpu_mask = mm->cpu_vm_mask; | |
431 | cpu_clear(smp_processor_id(), cpu_mask); | |
432 | ||
433 | if (current->active_mm == mm) { | |
434 | if (current->mm) | |
435 | local_flush_tlb(); | |
436 | else | |
437 | leave_mm(smp_processor_id()); | |
438 | } | |
439 | if (!cpus_empty(cpu_mask)) | |
440 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
441 | ||
442 | preempt_enable(); | |
443 | } | |
444 | ||
445 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |
446 | { | |
447 | struct mm_struct *mm = vma->vm_mm; | |
448 | cpumask_t cpu_mask; | |
449 | ||
450 | preempt_disable(); | |
451 | cpu_mask = mm->cpu_vm_mask; | |
452 | cpu_clear(smp_processor_id(), cpu_mask); | |
453 | ||
454 | if (current->active_mm == mm) { | |
455 | if(current->mm) | |
456 | __flush_tlb_one(va); | |
457 | else | |
458 | leave_mm(smp_processor_id()); | |
459 | } | |
460 | ||
461 | if (!cpus_empty(cpu_mask)) | |
462 | flush_tlb_others(cpu_mask, mm, va); | |
463 | ||
464 | preempt_enable(); | |
465 | } | |
129f6946 | 466 | EXPORT_SYMBOL(flush_tlb_page); |
1da177e4 LT |
467 | |
468 | static void do_flush_tlb_all(void* info) | |
469 | { | |
470 | unsigned long cpu = smp_processor_id(); | |
471 | ||
472 | __flush_tlb_all(); | |
473 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | |
474 | leave_mm(cpu); | |
475 | } | |
476 | ||
477 | void flush_tlb_all(void) | |
478 | { | |
479 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | |
480 | } | |
481 | ||
482 | /* | |
483 | * this function sends a 'reschedule' IPI to another CPU. | |
484 | * it goes straight through and wastes no time serializing | |
485 | * anything. Worst case is that we lose a reschedule ... | |
486 | */ | |
487 | void smp_send_reschedule(int cpu) | |
488 | { | |
f3705136 | 489 | WARN_ON(cpu_is_offline(cpu)); |
1da177e4 LT |
490 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
491 | } | |
492 | ||
493 | /* | |
494 | * Structure and data for smp_call_function(). This is designed to minimise | |
495 | * static memory requirements. It also looks cleaner. | |
496 | */ | |
497 | static DEFINE_SPINLOCK(call_lock); | |
498 | ||
499 | struct call_data_struct { | |
500 | void (*func) (void *info); | |
501 | void *info; | |
502 | atomic_t started; | |
503 | atomic_t finished; | |
504 | int wait; | |
505 | }; | |
506 | ||
6fe940d6 LS |
507 | void lock_ipi_call_lock(void) |
508 | { | |
509 | spin_lock_irq(&call_lock); | |
510 | } | |
511 | ||
512 | void unlock_ipi_call_lock(void) | |
513 | { | |
514 | spin_unlock_irq(&call_lock); | |
515 | } | |
516 | ||
78eef01b AM |
517 | static struct call_data_struct *call_data; |
518 | ||
519 | /** | |
520 | * smp_call_function(): Run a function on all other CPUs. | |
521 | * @func: The function to run. This must be fast and non-blocking. | |
522 | * @info: An arbitrary pointer to pass to the function. | |
523 | * @nonatomic: currently unused. | |
524 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
525 | * | |
526 | * Returns 0 on success, else a negative status code. Does not return until | |
1da177e4 LT |
527 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. |
528 | * | |
529 | * You must not call this function with disabled interrupts or from a | |
530 | * hardware interrupt handler or from a bottom half handler. | |
531 | */ | |
78eef01b AM |
532 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, |
533 | int wait) | |
1da177e4 LT |
534 | { |
535 | struct call_data_struct data; | |
f3705136 | 536 | int cpus; |
1da177e4 | 537 | |
f3705136 ZM |
538 | /* Holding any lock stops cpus from going down. */ |
539 | spin_lock(&call_lock); | |
540 | cpus = num_online_cpus() - 1; | |
541 | if (!cpus) { | |
542 | spin_unlock(&call_lock); | |
1da177e4 | 543 | return 0; |
f3705136 | 544 | } |
1da177e4 LT |
545 | |
546 | /* Can deadlock when called with interrupts disabled */ | |
547 | WARN_ON(irqs_disabled()); | |
548 | ||
549 | data.func = func; | |
550 | data.info = info; | |
551 | atomic_set(&data.started, 0); | |
552 | data.wait = wait; | |
553 | if (wait) | |
554 | atomic_set(&data.finished, 0); | |
555 | ||
1da177e4 LT |
556 | call_data = &data; |
557 | mb(); | |
558 | ||
559 | /* Send a message to all other CPUs and wait for them to respond */ | |
560 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | |
561 | ||
562 | /* Wait for response */ | |
563 | while (atomic_read(&data.started) != cpus) | |
564 | cpu_relax(); | |
565 | ||
566 | if (wait) | |
567 | while (atomic_read(&data.finished) != cpus) | |
568 | cpu_relax(); | |
569 | spin_unlock(&call_lock); | |
570 | ||
571 | return 0; | |
572 | } | |
129f6946 | 573 | EXPORT_SYMBOL(smp_call_function); |
1da177e4 LT |
574 | |
575 | static void stop_this_cpu (void * dummy) | |
576 | { | |
577 | /* | |
578 | * Remove this CPU: | |
579 | */ | |
580 | cpu_clear(smp_processor_id(), cpu_online_map); | |
581 | local_irq_disable(); | |
582 | disable_local_APIC(); | |
583 | if (cpu_data[smp_processor_id()].hlt_works_ok) | |
4bb0d3ec | 584 | for(;;) halt(); |
1da177e4 LT |
585 | for (;;); |
586 | } | |
587 | ||
588 | /* | |
589 | * this function calls the 'stop' function on all other CPUs in the system. | |
590 | */ | |
591 | ||
592 | void smp_send_stop(void) | |
593 | { | |
594 | smp_call_function(stop_this_cpu, NULL, 1, 0); | |
595 | ||
596 | local_irq_disable(); | |
597 | disable_local_APIC(); | |
598 | local_irq_enable(); | |
599 | } | |
600 | ||
601 | /* | |
602 | * Reschedule call back. Nothing to do, | |
603 | * all the work is done automatically when | |
604 | * we return from the interrupt. | |
605 | */ | |
606 | fastcall void smp_reschedule_interrupt(struct pt_regs *regs) | |
607 | { | |
608 | ack_APIC_irq(); | |
609 | } | |
610 | ||
611 | fastcall void smp_call_function_interrupt(struct pt_regs *regs) | |
612 | { | |
613 | void (*func) (void *info) = call_data->func; | |
614 | void *info = call_data->info; | |
615 | int wait = call_data->wait; | |
616 | ||
617 | ack_APIC_irq(); | |
618 | /* | |
619 | * Notify initiating CPU that I've grabbed the data and am | |
620 | * about to execute the function | |
621 | */ | |
622 | mb(); | |
623 | atomic_inc(&call_data->started); | |
624 | /* | |
625 | * At this point the info structure may be out of scope unless wait==1 | |
626 | */ | |
627 | irq_enter(); | |
628 | (*func)(info); | |
629 | irq_exit(); | |
630 | ||
631 | if (wait) { | |
632 | mb(); | |
633 | atomic_inc(&call_data->finished); | |
634 | } | |
635 | } | |
636 | ||
eaa70773 SE |
637 | /* |
638 | * this function sends a 'generic call function' IPI to one other CPU | |
639 | * in the system. | |
640 | * | |
641 | * cpu is a standard Linux logical CPU number. | |
642 | */ | |
643 | static void | |
644 | __smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
645 | int nonatomic, int wait) | |
646 | { | |
647 | struct call_data_struct data; | |
648 | int cpus = 1; | |
649 | ||
650 | data.func = func; | |
651 | data.info = info; | |
652 | atomic_set(&data.started, 0); | |
653 | data.wait = wait; | |
654 | if (wait) | |
655 | atomic_set(&data.finished, 0); | |
656 | ||
657 | call_data = &data; | |
658 | wmb(); | |
659 | /* Send a message to all other CPUs and wait for them to respond */ | |
660 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); | |
661 | ||
662 | /* Wait for response */ | |
663 | while (atomic_read(&data.started) != cpus) | |
664 | cpu_relax(); | |
665 | ||
666 | if (!wait) | |
667 | return; | |
668 | ||
669 | while (atomic_read(&data.finished) != cpus) | |
670 | cpu_relax(); | |
671 | } | |
672 | ||
673 | /* | |
674 | * smp_call_function_single - Run a function on another CPU | |
675 | * @func: The function to run. This must be fast and non-blocking. | |
676 | * @info: An arbitrary pointer to pass to the function. | |
677 | * @nonatomic: Currently unused. | |
678 | * @wait: If true, wait until function has completed on other CPUs. | |
679 | * | |
680 | * Retrurns 0 on success, else a negative status code. | |
681 | * | |
682 | * Does not return until the remote CPU is nearly ready to execute <func> | |
683 | * or is or has executed. | |
684 | */ | |
685 | ||
686 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | |
687 | int nonatomic, int wait) | |
688 | { | |
689 | /* prevent preemption and reschedule on another processor */ | |
690 | int me = get_cpu(); | |
691 | if (cpu == me) { | |
692 | WARN_ON(1); | |
693 | put_cpu(); | |
694 | return -EBUSY; | |
695 | } | |
696 | spin_lock_bh(&call_lock); | |
697 | __smp_call_function_single(cpu, func, info, nonatomic, wait); | |
698 | spin_unlock_bh(&call_lock); | |
699 | put_cpu(); | |
700 | return 0; | |
701 | } | |
702 | EXPORT_SYMBOL(smp_call_function_single); |