Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This program is free software; you can redistribute it and/or | |
3 | * modify it under the terms of the GNU General Public License | |
4 | * as published by the Free Software Foundation; either version 2 | |
5 | * of the License, or (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2000, 2001 Kanoj Sarcar | |
17 | * Copyright (C) 2000, 2001 Ralf Baechle | |
18 | * Copyright (C) 2000, 2001 Silicon Graphics, Inc. | |
19 | * Copyright (C) 2000, 2001, 2003 Broadcom Corporation | |
20 | */ | |
21 | #include <linux/cache.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/threads.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/time.h> | |
29 | #include <linux/timex.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/cpumask.h> | |
1e35aaba | 32 | #include <linux/cpu.h> |
1da177e4 LT |
33 | |
34 | #include <asm/atomic.h> | |
35 | #include <asm/cpu.h> | |
36 | #include <asm/processor.h> | |
37 | #include <asm/system.h> | |
38 | #include <asm/mmu_context.h> | |
39 | #include <asm/smp.h> | |
40 | ||
41c594ab RB |
41 | #ifdef CONFIG_MIPS_MT_SMTC |
42 | #include <asm/mipsmtregs.h> | |
43 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
44 | ||
1da177e4 LT |
45 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ |
46 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | |
47 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ | |
48 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | |
49 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | |
50 | ||
51 | EXPORT_SYMBOL(phys_cpu_present_map); | |
52 | EXPORT_SYMBOL(cpu_online_map); | |
53 | ||
de7fa296 | 54 | /* This happens early in bootup, can't really do it better */ |
1da177e4 LT |
55 | static void smp_tune_scheduling (void) |
56 | { | |
57 | struct cache_desc *cd = ¤t_cpu_data.scache; | |
de7fa296 | 58 | unsigned long cachesize = cd->linesz * cd->sets * cd->ways; |
1da177e4 | 59 | |
de7fa296 RB |
60 | if (cachesize > max_cache_size) |
61 | max_cache_size = cachesize; | |
1da177e4 LT |
62 | } |
63 | ||
64 | extern void __init calibrate_delay(void); | |
65 | extern ATTRIB_NORET void cpu_idle(void); | |
66 | ||
67 | /* | |
68 | * First C code run on the secondary CPUs after being started up by | |
69 | * the master. | |
70 | */ | |
71 | asmlinkage void start_secondary(void) | |
72 | { | |
5bfb5d69 | 73 | unsigned int cpu; |
1da177e4 | 74 | |
41c594ab RB |
75 | #ifdef CONFIG_MIPS_MT_SMTC |
76 | /* Only do cpu_probe for first TC of CPU */ | |
77 | if ((read_c0_tcbind() & TCBIND_CURTC) == 0) | |
78 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
1da177e4 LT |
79 | cpu_probe(); |
80 | cpu_report(); | |
81 | per_cpu_trap_init(); | |
82 | prom_init_secondary(); | |
83 | ||
84 | /* | |
85 | * XXX parity protection should be folded in here when it's converted | |
86 | * to an option instead of something based on .cputype | |
87 | */ | |
88 | ||
89 | calibrate_delay(); | |
5bfb5d69 NP |
90 | preempt_disable(); |
91 | cpu = smp_processor_id(); | |
1da177e4 LT |
92 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
93 | ||
94 | prom_smp_finish(); | |
95 | ||
96 | cpu_set(cpu, cpu_callin_map); | |
97 | ||
98 | cpu_idle(); | |
99 | } | |
100 | ||
101 | DEFINE_SPINLOCK(smp_call_lock); | |
102 | ||
103 | struct call_data_struct *call_data; | |
104 | ||
105 | /* | |
106 | * Run a function on all other CPUs. | |
107 | * <func> The function to run. This must be fast and non-blocking. | |
108 | * <info> An arbitrary pointer to pass to the function. | |
109 | * <retry> If true, keep retrying until ready. | |
110 | * <wait> If true, wait until function has completed on other CPUs. | |
111 | * [RETURNS] 0 on success, else a negative status code. | |
112 | * | |
113 | * Does not return until remote CPUs are nearly ready to execute <func> | |
114 | * or are or have executed. | |
115 | * | |
116 | * You must not call this function with disabled interrupts or from a | |
57f0060b RB |
117 | * hardware interrupt handler or from a bottom half handler: |
118 | * | |
119 | * CPU A CPU B | |
120 | * Disable interrupts | |
121 | * smp_call_function() | |
122 | * Take call_lock | |
123 | * Send IPIs | |
124 | * Wait for all cpus to acknowledge IPI | |
125 | * CPU A has not responded, spin waiting | |
126 | * for cpu A to respond, holding call_lock | |
127 | * smp_call_function() | |
128 | * Spin waiting for call_lock | |
129 | * Deadlock Deadlock | |
1da177e4 LT |
130 | */ |
131 | int smp_call_function (void (*func) (void *info), void *info, int retry, | |
132 | int wait) | |
133 | { | |
134 | struct call_data_struct data; | |
135 | int i, cpus = num_online_cpus() - 1; | |
136 | int cpu = smp_processor_id(); | |
137 | ||
ae1b3d51 RB |
138 | /* |
139 | * Can die spectacularly if this CPU isn't yet marked online | |
140 | */ | |
141 | BUG_ON(!cpu_online(cpu)); | |
142 | ||
1da177e4 LT |
143 | if (!cpus) |
144 | return 0; | |
145 | ||
146 | /* Can deadlock when called with interrupts disabled */ | |
147 | WARN_ON(irqs_disabled()); | |
148 | ||
149 | data.func = func; | |
150 | data.info = info; | |
151 | atomic_set(&data.started, 0); | |
152 | data.wait = wait; | |
153 | if (wait) | |
154 | atomic_set(&data.finished, 0); | |
155 | ||
156 | spin_lock(&smp_call_lock); | |
157 | call_data = &data; | |
0004a9df | 158 | smp_mb(); |
1da177e4 LT |
159 | |
160 | /* Send a message to all other CPUs and wait for them to respond */ | |
394e3902 AM |
161 | for_each_online_cpu(i) |
162 | if (i != cpu) | |
1da177e4 LT |
163 | core_send_ipi(i, SMP_CALL_FUNCTION); |
164 | ||
165 | /* Wait for response */ | |
166 | /* FIXME: lock-up detection, backtrace on lock-up */ | |
167 | while (atomic_read(&data.started) != cpus) | |
168 | barrier(); | |
169 | ||
170 | if (wait) | |
171 | while (atomic_read(&data.finished) != cpus) | |
172 | barrier(); | |
41c594ab | 173 | call_data = NULL; |
1da177e4 LT |
174 | spin_unlock(&smp_call_lock); |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
41c594ab | 179 | |
1da177e4 LT |
180 | void smp_call_function_interrupt(void) |
181 | { | |
182 | void (*func) (void *info) = call_data->func; | |
183 | void *info = call_data->info; | |
184 | int wait = call_data->wait; | |
185 | ||
186 | /* | |
187 | * Notify initiating CPU that I've grabbed the data and am | |
188 | * about to execute the function. | |
189 | */ | |
0004a9df | 190 | smp_mb(); |
1da177e4 LT |
191 | atomic_inc(&call_data->started); |
192 | ||
193 | /* | |
194 | * At this point the info structure may be out of scope unless wait==1. | |
195 | */ | |
196 | irq_enter(); | |
197 | (*func)(info); | |
198 | irq_exit(); | |
199 | ||
200 | if (wait) { | |
0004a9df | 201 | smp_mb(); |
1da177e4 LT |
202 | atomic_inc(&call_data->finished); |
203 | } | |
204 | } | |
205 | ||
206 | static void stop_this_cpu(void *dummy) | |
207 | { | |
208 | /* | |
209 | * Remove this CPU: | |
210 | */ | |
211 | cpu_clear(smp_processor_id(), cpu_online_map); | |
212 | local_irq_enable(); /* May need to service _machine_restart IPI */ | |
213 | for (;;); /* Wait if available. */ | |
214 | } | |
215 | ||
216 | void smp_send_stop(void) | |
217 | { | |
218 | smp_call_function(stop_this_cpu, NULL, 1, 0); | |
219 | } | |
220 | ||
221 | void __init smp_cpus_done(unsigned int max_cpus) | |
222 | { | |
223 | prom_cpus_done(); | |
224 | } | |
225 | ||
226 | /* called from main before smp_init() */ | |
227 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
228 | { | |
1da177e4 LT |
229 | init_new_context(current, &init_mm); |
230 | current_thread_info()->cpu = 0; | |
231 | smp_tune_scheduling(); | |
9b6695a8 | 232 | plat_prepare_cpus(max_cpus); |
320e6aba RB |
233 | #ifndef CONFIG_HOTPLUG_CPU |
234 | cpu_present_map = cpu_possible_map; | |
235 | #endif | |
1da177e4 LT |
236 | } |
237 | ||
238 | /* preload SMP state for boot cpu */ | |
239 | void __devinit smp_prepare_boot_cpu(void) | |
240 | { | |
241 | /* | |
242 | * This assumes that bootup is always handled by the processor | |
243 | * with the logic and physical number 0. | |
244 | */ | |
245 | __cpu_number_map[0] = 0; | |
246 | __cpu_logical_map[0] = 0; | |
247 | cpu_set(0, phys_cpu_present_map); | |
248 | cpu_set(0, cpu_online_map); | |
249 | cpu_set(0, cpu_callin_map); | |
250 | } | |
251 | ||
252 | /* | |
b727a602 RB |
253 | * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu |
254 | * and keep control until "cpu_online(cpu)" is set. Note: cpu is | |
255 | * physical, not logical. | |
1da177e4 | 256 | */ |
b282b6f8 | 257 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 LT |
258 | { |
259 | struct task_struct *idle; | |
260 | ||
261 | /* | |
b727a602 | 262 | * Processor goes to start_secondary(), sets online flag |
1da177e4 LT |
263 | * The following code is purely to make sure |
264 | * Linux can schedule processes on this slave. | |
265 | */ | |
266 | idle = fork_idle(cpu); | |
267 | if (IS_ERR(idle)) | |
b727a602 | 268 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
1da177e4 LT |
269 | |
270 | prom_boot_secondary(cpu, idle); | |
271 | ||
b727a602 RB |
272 | /* |
273 | * Trust is futile. We should really have timeouts ... | |
274 | */ | |
1da177e4 LT |
275 | while (!cpu_isset(cpu, cpu_callin_map)) |
276 | udelay(100); | |
277 | ||
278 | cpu_set(cpu, cpu_online_map); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
1da177e4 LT |
283 | /* Not really SMP stuff ... */ |
284 | int setup_profiling_timer(unsigned int multiplier) | |
285 | { | |
286 | return 0; | |
287 | } | |
288 | ||
289 | static void flush_tlb_all_ipi(void *info) | |
290 | { | |
291 | local_flush_tlb_all(); | |
292 | } | |
293 | ||
294 | void flush_tlb_all(void) | |
295 | { | |
9a244b95 | 296 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); |
1da177e4 LT |
297 | } |
298 | ||
299 | static void flush_tlb_mm_ipi(void *mm) | |
300 | { | |
301 | local_flush_tlb_mm((struct mm_struct *)mm); | |
302 | } | |
303 | ||
25969354 RB |
304 | /* |
305 | * Special Variant of smp_call_function for use by TLB functions: | |
306 | * | |
307 | * o No return value | |
308 | * o collapses to normal function call on UP kernels | |
309 | * o collapses to normal function call on systems with a single shared | |
310 | * primary cache. | |
311 | * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. | |
312 | */ | |
313 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | |
314 | { | |
315 | #ifndef CONFIG_MIPS_MT_SMTC | |
316 | smp_call_function(func, info, 1, 1); | |
317 | #endif | |
318 | } | |
319 | ||
320 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) | |
321 | { | |
322 | preempt_disable(); | |
323 | ||
324 | smp_on_other_tlbs(func, info); | |
325 | func(info); | |
326 | ||
327 | preempt_enable(); | |
328 | } | |
329 | ||
1da177e4 LT |
330 | /* |
331 | * The following tlb flush calls are invoked when old translations are | |
332 | * being torn down, or pte attributes are changing. For single threaded | |
333 | * address spaces, a new context is obtained on the current cpu, and tlb | |
334 | * context on other cpus are invalidated to force a new context allocation | |
335 | * at switch_mm time, should the mm ever be used on other cpus. For | |
336 | * multithreaded address spaces, intercpu interrupts have to be sent. | |
337 | * Another case where intercpu interrupts are required is when the target | |
338 | * mm might be active on another cpu (eg debuggers doing the flushes on | |
339 | * behalf of debugees, kswapd stealing pages from another process etc). | |
340 | * Kanoj 07/00. | |
341 | */ | |
342 | ||
343 | void flush_tlb_mm(struct mm_struct *mm) | |
344 | { | |
345 | preempt_disable(); | |
346 | ||
347 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | |
25969354 | 348 | smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); |
1da177e4 LT |
349 | } else { |
350 | int i; | |
351 | for (i = 0; i < num_online_cpus(); i++) | |
352 | if (smp_processor_id() != i) | |
353 | cpu_context(i, mm) = 0; | |
354 | } | |
355 | local_flush_tlb_mm(mm); | |
356 | ||
357 | preempt_enable(); | |
358 | } | |
359 | ||
360 | struct flush_tlb_data { | |
361 | struct vm_area_struct *vma; | |
362 | unsigned long addr1; | |
363 | unsigned long addr2; | |
364 | }; | |
365 | ||
366 | static void flush_tlb_range_ipi(void *info) | |
367 | { | |
368 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
369 | ||
370 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); | |
371 | } | |
372 | ||
373 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
374 | { | |
375 | struct mm_struct *mm = vma->vm_mm; | |
376 | ||
377 | preempt_disable(); | |
378 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | |
379 | struct flush_tlb_data fd; | |
380 | ||
381 | fd.vma = vma; | |
382 | fd.addr1 = start; | |
383 | fd.addr2 = end; | |
25969354 | 384 | smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); |
1da177e4 LT |
385 | } else { |
386 | int i; | |
387 | for (i = 0; i < num_online_cpus(); i++) | |
388 | if (smp_processor_id() != i) | |
389 | cpu_context(i, mm) = 0; | |
390 | } | |
391 | local_flush_tlb_range(vma, start, end); | |
392 | preempt_enable(); | |
393 | } | |
394 | ||
395 | static void flush_tlb_kernel_range_ipi(void *info) | |
396 | { | |
397 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
398 | ||
399 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); | |
400 | } | |
401 | ||
402 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
403 | { | |
404 | struct flush_tlb_data fd; | |
405 | ||
406 | fd.addr1 = start; | |
407 | fd.addr2 = end; | |
408 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); | |
409 | } | |
410 | ||
411 | static void flush_tlb_page_ipi(void *info) | |
412 | { | |
413 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
414 | ||
415 | local_flush_tlb_page(fd->vma, fd->addr1); | |
416 | } | |
417 | ||
418 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
419 | { | |
420 | preempt_disable(); | |
421 | if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { | |
422 | struct flush_tlb_data fd; | |
423 | ||
424 | fd.vma = vma; | |
425 | fd.addr1 = page; | |
25969354 | 426 | smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); |
1da177e4 LT |
427 | } else { |
428 | int i; | |
429 | for (i = 0; i < num_online_cpus(); i++) | |
430 | if (smp_processor_id() != i) | |
431 | cpu_context(i, vma->vm_mm) = 0; | |
432 | } | |
433 | local_flush_tlb_page(vma, page); | |
434 | preempt_enable(); | |
435 | } | |
436 | ||
437 | static void flush_tlb_one_ipi(void *info) | |
438 | { | |
439 | unsigned long vaddr = (unsigned long) info; | |
440 | ||
441 | local_flush_tlb_one(vaddr); | |
442 | } | |
443 | ||
444 | void flush_tlb_one(unsigned long vaddr) | |
445 | { | |
25969354 | 446 | smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); |
1da177e4 LT |
447 | } |
448 | ||
449 | EXPORT_SYMBOL(flush_tlb_page); | |
450 | EXPORT_SYMBOL(flush_tlb_one); |