Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Intel SMP support routines. | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * (c) 2002,2003 Andi Kleen, SuSE Labs. | |
7 | * | |
8 | * This code is released under the GNU General Public License version 2 or | |
9 | * later. | |
10 | */ | |
11 | ||
12 | #include <linux/init.h> | |
13 | ||
14 | #include <linux/mm.h> | |
15 | #include <linux/irq.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/smp_lock.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/kernel_stat.h> | |
21 | #include <linux/mc146818rtc.h> | |
22 | #include <linux/interrupt.h> | |
23 | ||
24 | #include <asm/mtrr.h> | |
25 | #include <asm/pgalloc.h> | |
26 | #include <asm/tlbflush.h> | |
27 | #include <asm/mach_apic.h> | |
28 | #include <asm/mmu_context.h> | |
29 | #include <asm/proto.h> | |
a8ab26fe | 30 | #include <asm/apicdef.h> |
1da177e4 LT |
31 | |
32 | /* | |
33 | * Smarter SMP flushing macros. | |
34 | * c/o Linus Torvalds. | |
35 | * | |
36 | * These mean you can really definitely utterly forget about | |
37 | * writing to user space from interrupts. (Its not allowed anyway). | |
38 | * | |
39 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
40 | */ | |
41 | ||
42 | static cpumask_t flush_cpumask; | |
43 | static struct mm_struct * flush_mm; | |
44 | static unsigned long flush_va; | |
45 | static DEFINE_SPINLOCK(tlbstate_lock); | |
46 | #define FLUSH_ALL -1ULL | |
47 | ||
48 | /* | |
49 | * We cannot call mmdrop() because we are in interrupt context, | |
50 | * instead update mm->cpu_vm_mask. | |
51 | */ | |
52 | static inline void leave_mm (unsigned long cpu) | |
53 | { | |
54 | if (read_pda(mmu_state) == TLBSTATE_OK) | |
55 | BUG(); | |
56 | clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask); | |
57 | load_cr3(swapper_pg_dir); | |
58 | } | |
59 | ||
60 | /* | |
61 | * | |
62 | * The flush IPI assumes that a thread switch happens in this order: | |
63 | * [cpu0: the cpu that switches] | |
64 | * 1) switch_mm() either 1a) or 1b) | |
65 | * 1a) thread switch to a different mm | |
66 | * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask); | |
67 | * Stop ipi delivery for the old mm. This is not synchronized with | |
68 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
69 | * for the wrong mm, and in the worst case we perform a superfluous | |
70 | * tlb flush. | |
71 | * 1a2) set cpu mmu_state to TLBSTATE_OK | |
72 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | |
73 | * was in lazy tlb mode. | |
74 | * 1a3) update cpu active_mm | |
75 | * Now cpu0 accepts tlb flushes for the new mm. | |
76 | * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask); | |
77 | * Now the other cpus will send tlb flush ipis. | |
78 | * 1a4) change cr3. | |
79 | * 1b) thread switch without mm change | |
80 | * cpu active_mm is correct, cpu0 already handles | |
81 | * flush ipis. | |
82 | * 1b1) set cpu mmu_state to TLBSTATE_OK | |
83 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
84 | * Atomically set the bit [other cpus will start sending flush ipis], | |
85 | * and test the bit. | |
86 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
87 | * 2) switch %%esp, ie current | |
88 | * | |
89 | * The interrupt must handle 2 special cases: | |
90 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
91 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
92 | * runs in kernel space, the cpu could load tlb entries for user space | |
93 | * pages. | |
94 | * | |
95 | * The good news is that cpu mmu_state is local to each cpu, no | |
96 | * write/read ordering problems. | |
97 | */ | |
98 | ||
99 | /* | |
100 | * TLB flush IPI: | |
101 | * | |
102 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
103 | * 2) Leave the mm if we are in the lazy tlb mode. | |
104 | */ | |
105 | ||
106 | asmlinkage void smp_invalidate_interrupt (void) | |
107 | { | |
108 | unsigned long cpu; | |
109 | ||
110 | cpu = get_cpu(); | |
111 | ||
112 | if (!cpu_isset(cpu, flush_cpumask)) | |
113 | goto out; | |
114 | /* | |
115 | * This was a BUG() but until someone can quote me the | |
116 | * line from the intel manual that guarantees an IPI to | |
117 | * multiple CPUs is retried _only_ on the erroring CPUs | |
118 | * its staying as a return | |
119 | * | |
120 | * BUG(); | |
121 | */ | |
122 | ||
123 | if (flush_mm == read_pda(active_mm)) { | |
124 | if (read_pda(mmu_state) == TLBSTATE_OK) { | |
125 | if (flush_va == FLUSH_ALL) | |
126 | local_flush_tlb(); | |
127 | else | |
128 | __flush_tlb_one(flush_va); | |
129 | } else | |
130 | leave_mm(cpu); | |
131 | } | |
5df3574e | 132 | out: |
1da177e4 LT |
133 | ack_APIC_irq(); |
134 | cpu_clear(cpu, flush_cpumask); | |
1da177e4 LT |
135 | put_cpu_no_resched(); |
136 | } | |
137 | ||
138 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |
139 | unsigned long va) | |
140 | { | |
141 | cpumask_t tmp; | |
142 | /* | |
143 | * A couple of (to be removed) sanity checks: | |
144 | * | |
145 | * - we do not send IPIs to not-yet booted CPUs. | |
146 | * - current CPU must not be in mask | |
147 | * - mask must exist :) | |
148 | */ | |
149 | BUG_ON(cpus_empty(cpumask)); | |
150 | cpus_and(tmp, cpumask, cpu_online_map); | |
151 | BUG_ON(!cpus_equal(tmp, cpumask)); | |
152 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | |
153 | if (!mm) | |
154 | BUG(); | |
155 | ||
156 | /* | |
157 | * I'm not happy about this global shared spinlock in the | |
158 | * MM hot path, but we'll see how contended it is. | |
159 | * Temporarily this turns IRQs off, so that lockups are | |
160 | * detected by the NMI watchdog. | |
161 | */ | |
162 | spin_lock(&tlbstate_lock); | |
163 | ||
164 | flush_mm = mm; | |
165 | flush_va = va; | |
166 | cpus_or(flush_cpumask, cpumask, flush_cpumask); | |
167 | ||
168 | /* | |
169 | * We have to send the IPI only to | |
170 | * CPUs affected. | |
171 | */ | |
172 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); | |
173 | ||
174 | while (!cpus_empty(flush_cpumask)) | |
175 | mb(); /* nothing. lockup detection does not belong here */; | |
176 | ||
177 | flush_mm = NULL; | |
178 | flush_va = 0; | |
179 | spin_unlock(&tlbstate_lock); | |
180 | } | |
181 | ||
182 | void flush_tlb_current_task(void) | |
183 | { | |
184 | struct mm_struct *mm = current->mm; | |
185 | cpumask_t cpu_mask; | |
186 | ||
187 | preempt_disable(); | |
188 | cpu_mask = mm->cpu_vm_mask; | |
189 | cpu_clear(smp_processor_id(), cpu_mask); | |
190 | ||
191 | local_flush_tlb(); | |
192 | if (!cpus_empty(cpu_mask)) | |
193 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
194 | preempt_enable(); | |
195 | } | |
196 | ||
197 | void flush_tlb_mm (struct mm_struct * mm) | |
198 | { | |
199 | cpumask_t cpu_mask; | |
200 | ||
201 | preempt_disable(); | |
202 | cpu_mask = mm->cpu_vm_mask; | |
203 | cpu_clear(smp_processor_id(), cpu_mask); | |
204 | ||
205 | if (current->active_mm == mm) { | |
206 | if (current->mm) | |
207 | local_flush_tlb(); | |
208 | else | |
209 | leave_mm(smp_processor_id()); | |
210 | } | |
211 | if (!cpus_empty(cpu_mask)) | |
212 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
213 | ||
214 | preempt_enable(); | |
215 | } | |
216 | ||
217 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |
218 | { | |
219 | struct mm_struct *mm = vma->vm_mm; | |
220 | cpumask_t cpu_mask; | |
221 | ||
222 | preempt_disable(); | |
223 | cpu_mask = mm->cpu_vm_mask; | |
224 | cpu_clear(smp_processor_id(), cpu_mask); | |
225 | ||
226 | if (current->active_mm == mm) { | |
227 | if(current->mm) | |
228 | __flush_tlb_one(va); | |
229 | else | |
230 | leave_mm(smp_processor_id()); | |
231 | } | |
232 | ||
233 | if (!cpus_empty(cpu_mask)) | |
234 | flush_tlb_others(cpu_mask, mm, va); | |
235 | ||
236 | preempt_enable(); | |
237 | } | |
238 | ||
239 | static void do_flush_tlb_all(void* info) | |
240 | { | |
241 | unsigned long cpu = smp_processor_id(); | |
242 | ||
243 | __flush_tlb_all(); | |
244 | if (read_pda(mmu_state) == TLBSTATE_LAZY) | |
245 | leave_mm(cpu); | |
246 | } | |
247 | ||
248 | void flush_tlb_all(void) | |
249 | { | |
250 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | |
251 | } | |
252 | ||
253 | void smp_kdb_stop(void) | |
254 | { | |
255 | send_IPI_allbutself(KDB_VECTOR); | |
256 | } | |
257 | ||
258 | /* | |
259 | * this function sends a 'reschedule' IPI to another CPU. | |
260 | * it goes straight through and wastes no time serializing | |
261 | * anything. Worst case is that we lose a reschedule ... | |
262 | */ | |
263 | ||
264 | void smp_send_reschedule(int cpu) | |
265 | { | |
266 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | |
267 | } | |
268 | ||
269 | /* | |
270 | * Structure and data for smp_call_function(). This is designed to minimise | |
271 | * static memory requirements. It also looks cleaner. | |
272 | */ | |
273 | static DEFINE_SPINLOCK(call_lock); | |
274 | ||
275 | struct call_data_struct { | |
276 | void (*func) (void *info); | |
277 | void *info; | |
278 | atomic_t started; | |
279 | atomic_t finished; | |
280 | int wait; | |
281 | }; | |
282 | ||
283 | static struct call_data_struct * call_data; | |
284 | ||
884d9e40 AR |
285 | void lock_ipi_call_lock(void) |
286 | { | |
287 | spin_lock_irq(&call_lock); | |
288 | } | |
289 | ||
290 | void unlock_ipi_call_lock(void) | |
291 | { | |
292 | spin_unlock_irq(&call_lock); | |
293 | } | |
294 | ||
3d483f47 EB |
295 | /* |
296 | * this function sends a 'generic call function' IPI to one other CPU | |
297 | * in the system. | |
298 | */ | |
299 | static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info, | |
300 | int nonatomic, int wait) | |
301 | { | |
302 | struct call_data_struct data; | |
303 | int cpus = 1; | |
304 | ||
305 | data.func = func; | |
306 | data.info = info; | |
307 | atomic_set(&data.started, 0); | |
308 | data.wait = wait; | |
309 | if (wait) | |
310 | atomic_set(&data.finished, 0); | |
311 | ||
312 | call_data = &data; | |
313 | wmb(); | |
314 | /* Send a message to all other CPUs and wait for them to respond */ | |
315 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); | |
316 | ||
317 | /* Wait for response */ | |
318 | while (atomic_read(&data.started) != cpus) | |
319 | cpu_relax(); | |
320 | ||
321 | if (!wait) | |
322 | return; | |
323 | ||
324 | while (atomic_read(&data.finished) != cpus) | |
325 | cpu_relax(); | |
326 | } | |
327 | ||
328 | /* | |
329 | * smp_call_function_single - Run a function on another CPU | |
330 | * @func: The function to run. This must be fast and non-blocking. | |
331 | * @info: An arbitrary pointer to pass to the function. | |
332 | * @nonatomic: Currently unused. | |
333 | * @wait: If true, wait until function has completed on other CPUs. | |
334 | * | |
335 | * Retrurns 0 on success, else a negative status code. | |
336 | * | |
337 | * Does not return until the remote CPU is nearly ready to execute <func> | |
338 | * or is or has executed. | |
339 | */ | |
340 | ||
341 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | |
342 | int nonatomic, int wait) | |
343 | { | |
344 | /* prevent preemption and reschedule on another processor */ | |
345 | int me = get_cpu(); | |
346 | if (cpu == me) { | |
347 | WARN_ON(1); | |
348 | put_cpu(); | |
349 | return -EBUSY; | |
350 | } | |
351 | spin_lock_bh(&call_lock); | |
352 | __smp_call_function_single(cpu, func, info, nonatomic, wait); | |
353 | spin_unlock_bh(&call_lock); | |
354 | put_cpu(); | |
355 | return 0; | |
356 | } | |
357 | ||
1da177e4 LT |
358 | /* |
359 | * this function sends a 'generic call function' IPI to all other CPUs | |
360 | * in the system. | |
361 | */ | |
362 | static void __smp_call_function (void (*func) (void *info), void *info, | |
363 | int nonatomic, int wait) | |
364 | { | |
365 | struct call_data_struct data; | |
366 | int cpus = num_online_cpus()-1; | |
367 | ||
368 | if (!cpus) | |
369 | return; | |
370 | ||
371 | data.func = func; | |
372 | data.info = info; | |
373 | atomic_set(&data.started, 0); | |
374 | data.wait = wait; | |
375 | if (wait) | |
376 | atomic_set(&data.finished, 0); | |
377 | ||
378 | call_data = &data; | |
379 | wmb(); | |
380 | /* Send a message to all other CPUs and wait for them to respond */ | |
381 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | |
382 | ||
383 | /* Wait for response */ | |
384 | while (atomic_read(&data.started) != cpus) | |
385 | cpu_relax(); | |
386 | ||
387 | if (!wait) | |
388 | return; | |
389 | ||
390 | while (atomic_read(&data.finished) != cpus) | |
391 | cpu_relax(); | |
392 | } | |
393 | ||
394 | /* | |
395 | * smp_call_function - run a function on all other CPUs. | |
396 | * @func: The function to run. This must be fast and non-blocking. | |
397 | * @info: An arbitrary pointer to pass to the function. | |
398 | * @nonatomic: currently unused. | |
399 | * @wait: If true, wait (atomically) until function has completed on other | |
400 | * CPUs. | |
401 | * | |
402 | * Returns 0 on success, else a negative status code. Does not return until | |
403 | * remote CPUs are nearly ready to execute func or are or have executed. | |
404 | * | |
405 | * You must not call this function with disabled interrupts or from a | |
406 | * hardware interrupt handler or from a bottom half handler. | |
407 | * Actually there are a few legal cases, like panic. | |
408 | */ | |
409 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
410 | int wait) | |
411 | { | |
412 | spin_lock(&call_lock); | |
413 | __smp_call_function(func,info,nonatomic,wait); | |
414 | spin_unlock(&call_lock); | |
415 | return 0; | |
416 | } | |
417 | ||
418 | void smp_stop_cpu(void) | |
419 | { | |
420 | /* | |
421 | * Remove this CPU: | |
422 | */ | |
423 | cpu_clear(smp_processor_id(), cpu_online_map); | |
424 | local_irq_disable(); | |
425 | disable_local_APIC(); | |
426 | local_irq_enable(); | |
427 | } | |
428 | ||
429 | static void smp_really_stop_cpu(void *dummy) | |
430 | { | |
431 | smp_stop_cpu(); | |
432 | for (;;) | |
433 | asm("hlt"); | |
434 | } | |
435 | ||
436 | void smp_send_stop(void) | |
437 | { | |
438 | int nolock = 0; | |
439 | if (reboot_force) | |
440 | return; | |
441 | /* Don't deadlock on the call lock in panic */ | |
442 | if (!spin_trylock(&call_lock)) { | |
443 | /* ignore locking because we have paniced anyways */ | |
444 | nolock = 1; | |
445 | } | |
446 | __smp_call_function(smp_really_stop_cpu, NULL, 0, 0); | |
447 | if (!nolock) | |
448 | spin_unlock(&call_lock); | |
449 | ||
450 | local_irq_disable(); | |
451 | disable_local_APIC(); | |
452 | local_irq_enable(); | |
453 | } | |
454 | ||
455 | /* | |
456 | * Reschedule call back. Nothing to do, | |
457 | * all the work is done automatically when | |
458 | * we return from the interrupt. | |
459 | */ | |
460 | asmlinkage void smp_reschedule_interrupt(void) | |
461 | { | |
462 | ack_APIC_irq(); | |
463 | } | |
464 | ||
465 | asmlinkage void smp_call_function_interrupt(void) | |
466 | { | |
467 | void (*func) (void *info) = call_data->func; | |
468 | void *info = call_data->info; | |
469 | int wait = call_data->wait; | |
470 | ||
471 | ack_APIC_irq(); | |
472 | /* | |
473 | * Notify initiating CPU that I've grabbed the data and am | |
474 | * about to execute the function | |
475 | */ | |
476 | mb(); | |
477 | atomic_inc(&call_data->started); | |
478 | /* | |
479 | * At this point the info structure may be out of scope unless wait==1 | |
480 | */ | |
481 | irq_enter(); | |
482 | (*func)(info); | |
483 | irq_exit(); | |
484 | if (wait) { | |
485 | mb(); | |
486 | atomic_inc(&call_data->finished); | |
487 | } | |
488 | } | |
a8ab26fe AK |
489 | |
490 | int safe_smp_processor_id(void) | |
491 | { | |
492 | int apicid, i; | |
493 | ||
494 | if (disable_apic) | |
495 | return 0; | |
496 | ||
497 | apicid = hard_smp_processor_id(); | |
498 | if (x86_cpu_to_apicid[apicid] == apicid) | |
499 | return apicid; | |
500 | ||
501 | for (i = 0; i < NR_CPUS; ++i) { | |
502 | if (x86_cpu_to_apicid[i] == apicid) | |
503 | return i; | |
504 | } | |
505 | ||
506 | /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI, | |
507 | * or called too early. Either way, we must be CPU 0. */ | |
508 | if (x86_cpu_to_apicid[0] == BAD_APICID) | |
509 | return 0; | |
510 | ||
511 | return 0; /* Should not happen */ | |
512 | } |