Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SMP support for ppc. | |
3 | * | |
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
5 | * deal of code from the sparc and intel versions. | |
6 | * | |
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
8 | * | |
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #undef DEBUG | |
19 | ||
1da177e4 LT |
20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/cache.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/sysdev.h> | |
31 | #include <linux/cpu.h> | |
32 | #include <linux/notifier.h> | |
4b703a23 | 33 | #include <linux/topology.h> |
1da177e4 LT |
34 | |
35 | #include <asm/ptrace.h> | |
36 | #include <asm/atomic.h> | |
37 | #include <asm/irq.h> | |
38 | #include <asm/page.h> | |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/prom.h> | |
41 | #include <asm/smp.h> | |
1da177e4 LT |
42 | #include <asm/time.h> |
43 | #include <asm/machdep.h> | |
e2075f79 | 44 | #include <asm/cputhreads.h> |
1da177e4 LT |
45 | #include <asm/cputable.h> |
46 | #include <asm/system.h> | |
bbeb3f4c | 47 | #include <asm/mpic.h> |
a7f290da | 48 | #include <asm/vdso_datapage.h> |
5ad57078 PM |
49 | #ifdef CONFIG_PPC64 |
50 | #include <asm/paca.h> | |
51 | #endif | |
52 | ||
1da177e4 | 53 | #ifdef DEBUG |
f9e4ec57 | 54 | #include <asm/udbg.h> |
1da177e4 LT |
55 | #define DBG(fmt...) udbg_printf(fmt) |
56 | #else | |
57 | #define DBG(fmt...) | |
58 | #endif | |
59 | ||
f9e4ec57 ME |
60 | struct thread_info *secondary_ti; |
61 | ||
d5a7430d | 62 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
440a0857 | 63 | DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; |
1da177e4 | 64 | |
d5a7430d | 65 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
440a0857 | 66 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
1da177e4 | 67 | |
5ad57078 | 68 | /* SMP operations for this machine */ |
1da177e4 LT |
69 | struct smp_ops_t *smp_ops; |
70 | ||
71 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | |
72 | ||
1da177e4 LT |
73 | int smt_enabled_at_boot = 1; |
74 | ||
cc532915 ME |
75 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
76 | ||
5ad57078 | 77 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
78 | void __devinit smp_generic_kick_cpu(int nr) |
79 | { | |
80 | BUG_ON(nr < 0 || nr >= NR_CPUS); | |
81 | ||
82 | /* | |
83 | * The processor is currently spinning, waiting for the | |
84 | * cpu_start field to become non-zero After we set cpu_start, | |
85 | * the processor will continue on to secondary_start | |
86 | */ | |
87 | paca[nr].cpu_start = 1; | |
0d8d4d42 | 88 | smp_mb(); |
1da177e4 | 89 | } |
5ad57078 | 90 | #endif |
1da177e4 | 91 | |
7d12e780 | 92 | void smp_message_recv(int msg) |
1da177e4 LT |
93 | { |
94 | switch(msg) { | |
95 | case PPC_MSG_CALL_FUNCTION: | |
b7d7a240 | 96 | generic_smp_call_function_interrupt(); |
1da177e4 | 97 | break; |
5ad57078 | 98 | case PPC_MSG_RESCHEDULE: |
22d660ff | 99 | /* we notice need_resched on exit */ |
1da177e4 | 100 | break; |
b7d7a240 JA |
101 | case PPC_MSG_CALL_FUNC_SINGLE: |
102 | generic_smp_call_function_single_interrupt(); | |
103 | break; | |
1da177e4 | 104 | case PPC_MSG_DEBUGGER_BREAK: |
cc532915 | 105 | if (crash_ipi_function_ptr) { |
7d12e780 | 106 | crash_ipi_function_ptr(get_irq_regs()); |
cc532915 ME |
107 | break; |
108 | } | |
109 | #ifdef CONFIG_DEBUGGER | |
7d12e780 | 110 | debugger_ipi(get_irq_regs()); |
1da177e4 | 111 | break; |
cc532915 ME |
112 | #endif /* CONFIG_DEBUGGER */ |
113 | /* FALLTHROUGH */ | |
1da177e4 LT |
114 | default: |
115 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | |
116 | smp_processor_id(), msg); | |
117 | break; | |
118 | } | |
119 | } | |
120 | ||
25ddd738 MM |
121 | static irqreturn_t call_function_action(int irq, void *data) |
122 | { | |
123 | generic_smp_call_function_interrupt(); | |
124 | return IRQ_HANDLED; | |
125 | } | |
126 | ||
127 | static irqreturn_t reschedule_action(int irq, void *data) | |
128 | { | |
129 | /* we just need the return path side effect of checking need_resched */ | |
130 | return IRQ_HANDLED; | |
131 | } | |
132 | ||
133 | static irqreturn_t call_function_single_action(int irq, void *data) | |
134 | { | |
135 | generic_smp_call_function_single_interrupt(); | |
136 | return IRQ_HANDLED; | |
137 | } | |
138 | ||
139 | static irqreturn_t debug_ipi_action(int irq, void *data) | |
140 | { | |
141 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | |
142 | return IRQ_HANDLED; | |
143 | } | |
144 | ||
145 | static irq_handler_t smp_ipi_action[] = { | |
146 | [PPC_MSG_CALL_FUNCTION] = call_function_action, | |
147 | [PPC_MSG_RESCHEDULE] = reschedule_action, | |
148 | [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, | |
149 | [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, | |
150 | }; | |
151 | ||
152 | const char *smp_ipi_name[] = { | |
153 | [PPC_MSG_CALL_FUNCTION] = "ipi call function", | |
154 | [PPC_MSG_RESCHEDULE] = "ipi reschedule", | |
155 | [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", | |
156 | [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", | |
157 | }; | |
158 | ||
159 | /* optional function to request ipi, for controllers with >= 4 ipis */ | |
160 | int smp_request_message_ipi(int virq, int msg) | |
161 | { | |
162 | int err; | |
163 | ||
164 | if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { | |
165 | return -EINVAL; | |
166 | } | |
167 | #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) | |
168 | if (msg == PPC_MSG_DEBUGGER_BREAK) { | |
169 | return 1; | |
170 | } | |
171 | #endif | |
172 | err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, | |
173 | smp_ipi_name[msg], 0); | |
174 | WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", | |
175 | virq, smp_ipi_name[msg], err); | |
176 | ||
177 | return err; | |
178 | } | |
179 | ||
1da177e4 LT |
180 | void smp_send_reschedule(int cpu) |
181 | { | |
8cffc6ac BH |
182 | if (likely(smp_ops)) |
183 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | |
1da177e4 LT |
184 | } |
185 | ||
b7d7a240 JA |
186 | void arch_send_call_function_single_ipi(int cpu) |
187 | { | |
188 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); | |
189 | } | |
190 | ||
191 | void arch_send_call_function_ipi(cpumask_t mask) | |
192 | { | |
193 | unsigned int cpu; | |
194 | ||
195 | for_each_cpu_mask(cpu, mask) | |
196 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); | |
197 | } | |
198 | ||
1da177e4 LT |
199 | #ifdef CONFIG_DEBUGGER |
200 | void smp_send_debugger_break(int cpu) | |
201 | { | |
8cffc6ac BH |
202 | if (likely(smp_ops)) |
203 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | |
1da177e4 LT |
204 | } |
205 | #endif | |
206 | ||
cc532915 ME |
207 | #ifdef CONFIG_KEXEC |
208 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) | |
209 | { | |
210 | crash_ipi_function_ptr = crash_ipi_callback; | |
8cffc6ac | 211 | if (crash_ipi_callback && smp_ops) { |
cc532915 ME |
212 | mb(); |
213 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); | |
214 | } | |
215 | } | |
216 | #endif | |
217 | ||
1da177e4 LT |
218 | static void stop_this_cpu(void *dummy) |
219 | { | |
220 | local_irq_disable(); | |
221 | while (1) | |
222 | ; | |
223 | } | |
224 | ||
8fd7675c SS |
225 | void smp_send_stop(void) |
226 | { | |
8691e5a8 | 227 | smp_call_function(stop_this_cpu, NULL, 0); |
1da177e4 LT |
228 | } |
229 | ||
1da177e4 LT |
230 | struct thread_info *current_set[NR_CPUS]; |
231 | ||
1da177e4 LT |
232 | static void __devinit smp_store_cpu_info(int id) |
233 | { | |
234 | per_cpu(pvr, id) = mfspr(SPRN_PVR); | |
235 | } | |
236 | ||
237 | static void __init smp_create_idle(unsigned int cpu) | |
238 | { | |
239 | struct task_struct *p; | |
240 | ||
241 | /* create a process for the processor */ | |
242 | p = fork_idle(cpu); | |
243 | if (IS_ERR(p)) | |
244 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | |
5ad57078 | 245 | #ifdef CONFIG_PPC64 |
1da177e4 | 246 | paca[cpu].__current = p; |
3b575064 PM |
247 | paca[cpu].kstack = (unsigned long) task_thread_info(p) |
248 | + THREAD_SIZE - STACK_FRAME_OVERHEAD; | |
5ad57078 | 249 | #endif |
b5e2fc1c AV |
250 | current_set[cpu] = task_thread_info(p); |
251 | task_thread_info(p)->cpu = cpu; | |
1da177e4 LT |
252 | } |
253 | ||
254 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
255 | { | |
256 | unsigned int cpu; | |
257 | ||
258 | DBG("smp_prepare_cpus\n"); | |
259 | ||
260 | /* | |
261 | * setup_cpu may need to be called on the boot cpu. We havent | |
262 | * spun any cpus up but lets be paranoid. | |
263 | */ | |
264 | BUG_ON(boot_cpuid != smp_processor_id()); | |
265 | ||
266 | /* Fixup boot cpu */ | |
267 | smp_store_cpu_info(boot_cpuid); | |
268 | cpu_callin_map[boot_cpuid] = 1; | |
269 | ||
8cffc6ac BH |
270 | if (smp_ops) |
271 | max_cpus = smp_ops->probe(); | |
272 | else | |
273 | max_cpus = 1; | |
1da177e4 LT |
274 | |
275 | smp_space_timers(max_cpus); | |
276 | ||
0e551954 | 277 | for_each_possible_cpu(cpu) |
1da177e4 LT |
278 | if (cpu != boot_cpuid) |
279 | smp_create_idle(cpu); | |
280 | } | |
281 | ||
282 | void __devinit smp_prepare_boot_cpu(void) | |
283 | { | |
284 | BUG_ON(smp_processor_id() != boot_cpuid); | |
285 | ||
286 | cpu_set(boot_cpuid, cpu_online_map); | |
e2075f79 | 287 | cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); |
440a0857 | 288 | cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); |
5ad57078 | 289 | #ifdef CONFIG_PPC64 |
1da177e4 | 290 | paca[boot_cpuid].__current = current; |
5ad57078 | 291 | #endif |
b5e2fc1c | 292 | current_set[boot_cpuid] = task_thread_info(current); |
1da177e4 LT |
293 | } |
294 | ||
295 | #ifdef CONFIG_HOTPLUG_CPU | |
296 | /* State of each CPU during hotplug phases */ | |
297 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
298 | ||
299 | int generic_cpu_disable(void) | |
300 | { | |
301 | unsigned int cpu = smp_processor_id(); | |
302 | ||
303 | if (cpu == boot_cpuid) | |
304 | return -EBUSY; | |
305 | ||
1da177e4 | 306 | cpu_clear(cpu, cpu_online_map); |
799d6046 | 307 | #ifdef CONFIG_PPC64 |
a7f290da | 308 | vdso_data->processorCount--; |
1da177e4 | 309 | fixup_irqs(cpu_online_map); |
094fe2e7 | 310 | #endif |
1da177e4 LT |
311 | return 0; |
312 | } | |
313 | ||
314 | int generic_cpu_enable(unsigned int cpu) | |
315 | { | |
316 | /* Do the normal bootup if we haven't | |
317 | * already bootstrapped. */ | |
318 | if (system_state != SYSTEM_RUNNING) | |
319 | return -ENOSYS; | |
320 | ||
321 | /* get the target out of it's holding state */ | |
322 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
0d8d4d42 | 323 | smp_wmb(); |
1da177e4 LT |
324 | |
325 | while (!cpu_online(cpu)) | |
326 | cpu_relax(); | |
327 | ||
094fe2e7 | 328 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
329 | fixup_irqs(cpu_online_map); |
330 | /* counter the irq disable in fixup_irqs */ | |
331 | local_irq_enable(); | |
094fe2e7 | 332 | #endif |
1da177e4 LT |
333 | return 0; |
334 | } | |
335 | ||
336 | void generic_cpu_die(unsigned int cpu) | |
337 | { | |
338 | int i; | |
339 | ||
340 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 341 | smp_rmb(); |
1da177e4 LT |
342 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
343 | return; | |
344 | msleep(100); | |
345 | } | |
346 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
347 | } | |
348 | ||
349 | void generic_mach_cpu_die(void) | |
350 | { | |
351 | unsigned int cpu; | |
352 | ||
353 | local_irq_disable(); | |
354 | cpu = smp_processor_id(); | |
355 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | |
356 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
0d8d4d42 | 357 | smp_wmb(); |
1da177e4 LT |
358 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
359 | cpu_relax(); | |
1da177e4 LT |
360 | cpu_set(cpu, cpu_online_map); |
361 | local_irq_enable(); | |
362 | } | |
363 | #endif | |
364 | ||
365 | static int __devinit cpu_enable(unsigned int cpu) | |
366 | { | |
8cffc6ac | 367 | if (smp_ops && smp_ops->cpu_enable) |
1da177e4 LT |
368 | return smp_ops->cpu_enable(cpu); |
369 | ||
370 | return -ENOSYS; | |
371 | } | |
372 | ||
b282b6f8 | 373 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 LT |
374 | { |
375 | int c; | |
376 | ||
5ad57078 | 377 | secondary_ti = current_set[cpu]; |
1da177e4 LT |
378 | if (!cpu_enable(cpu)) |
379 | return 0; | |
380 | ||
8cffc6ac BH |
381 | if (smp_ops == NULL || |
382 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | |
1da177e4 LT |
383 | return -EINVAL; |
384 | ||
1da177e4 LT |
385 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
386 | * hotplug | |
387 | */ | |
388 | cpu_callin_map[cpu] = 0; | |
389 | ||
390 | /* The information for processor bringup must | |
391 | * be written out to main store before we release | |
392 | * the processor. | |
393 | */ | |
0d8d4d42 | 394 | smp_mb(); |
1da177e4 LT |
395 | |
396 | /* wake up cpus */ | |
397 | DBG("smp: kicking cpu %d\n", cpu); | |
398 | smp_ops->kick_cpu(cpu); | |
399 | ||
400 | /* | |
401 | * wait to see if the cpu made a callin (is actually up). | |
402 | * use this value that I found through experimentation. | |
403 | * -- Cort | |
404 | */ | |
405 | if (system_state < SYSTEM_RUNNING) | |
ee0339f2 | 406 | for (c = 50000; c && !cpu_callin_map[cpu]; c--) |
1da177e4 LT |
407 | udelay(100); |
408 | #ifdef CONFIG_HOTPLUG_CPU | |
409 | else | |
410 | /* | |
411 | * CPUs can take much longer to come up in the | |
412 | * hotplug case. Wait five seconds. | |
413 | */ | |
414 | for (c = 25; c && !cpu_callin_map[cpu]; c--) { | |
415 | msleep(200); | |
416 | } | |
417 | #endif | |
418 | ||
419 | if (!cpu_callin_map[cpu]) { | |
420 | printk("Processor %u is stuck.\n", cpu); | |
421 | return -ENOENT; | |
422 | } | |
423 | ||
424 | printk("Processor %u found.\n", cpu); | |
425 | ||
426 | if (smp_ops->give_timebase) | |
427 | smp_ops->give_timebase(); | |
428 | ||
429 | /* Wait until cpu puts itself in the online map */ | |
430 | while (!cpu_online(cpu)) | |
431 | cpu_relax(); | |
432 | ||
433 | return 0; | |
434 | } | |
435 | ||
e9efed3b NL |
436 | /* Return the value of the reg property corresponding to the given |
437 | * logical cpu. | |
438 | */ | |
439 | int cpu_to_core_id(int cpu) | |
440 | { | |
441 | struct device_node *np; | |
442 | const int *reg; | |
443 | int id = -1; | |
444 | ||
445 | np = of_get_cpu_node(cpu, NULL); | |
446 | if (!np) | |
447 | goto out; | |
448 | ||
449 | reg = of_get_property(np, "reg", NULL); | |
450 | if (!reg) | |
451 | goto out; | |
452 | ||
453 | id = *reg; | |
454 | out: | |
455 | of_node_put(np); | |
456 | return id; | |
457 | } | |
458 | ||
440a0857 NL |
459 | /* Must be called when no change can occur to cpu_present_map, |
460 | * i.e. during cpu online or offline. | |
461 | */ | |
462 | static struct device_node *cpu_to_l2cache(int cpu) | |
463 | { | |
464 | struct device_node *np; | |
b2ea25b9 | 465 | struct device_node *cache; |
440a0857 NL |
466 | |
467 | if (!cpu_present(cpu)) | |
468 | return NULL; | |
469 | ||
470 | np = of_get_cpu_node(cpu, NULL); | |
471 | if (np == NULL) | |
472 | return NULL; | |
473 | ||
b2ea25b9 NL |
474 | cache = of_find_next_cache_node(np); |
475 | ||
440a0857 NL |
476 | of_node_put(np); |
477 | ||
b2ea25b9 | 478 | return cache; |
440a0857 | 479 | } |
1da177e4 LT |
480 | |
481 | /* Activate a secondary processor. */ | |
482 | int __devinit start_secondary(void *unused) | |
483 | { | |
484 | unsigned int cpu = smp_processor_id(); | |
440a0857 | 485 | struct device_node *l2_cache; |
e2075f79 | 486 | int i, base; |
1da177e4 LT |
487 | |
488 | atomic_inc(&init_mm.mm_count); | |
489 | current->active_mm = &init_mm; | |
490 | ||
491 | smp_store_cpu_info(cpu); | |
5ad57078 | 492 | set_dec(tb_ticks_per_jiffy); |
e4d76e1c | 493 | preempt_disable(); |
1da177e4 LT |
494 | cpu_callin_map[cpu] = 1; |
495 | ||
496 | smp_ops->setup_cpu(cpu); | |
497 | if (smp_ops->take_timebase) | |
498 | smp_ops->take_timebase(); | |
499 | ||
7d4d6154 | 500 | if (system_state > SYSTEM_BOOTING) |
c6622f63 | 501 | snapshot_timebase(); |
7d4d6154 | 502 | |
d831d0b8 TB |
503 | secondary_cpu_time_init(); |
504 | ||
b7d7a240 | 505 | ipi_call_lock(); |
e545a614 | 506 | notify_cpu_starting(cpu); |
1da177e4 | 507 | cpu_set(cpu, cpu_online_map); |
e2075f79 NL |
508 | /* Update sibling maps */ |
509 | base = cpu_first_thread_in_core(cpu); | |
510 | for (i = 0; i < threads_per_core; i++) { | |
511 | if (cpu_is_offline(base + i)) | |
512 | continue; | |
513 | cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); | |
514 | cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); | |
440a0857 NL |
515 | |
516 | /* cpu_core_map should be a superset of | |
517 | * cpu_sibling_map even if we don't have cache | |
518 | * information, so update the former here, too. | |
519 | */ | |
520 | cpu_set(cpu, per_cpu(cpu_core_map, base +i)); | |
521 | cpu_set(base + i, per_cpu(cpu_core_map, cpu)); | |
e2075f79 | 522 | } |
440a0857 NL |
523 | l2_cache = cpu_to_l2cache(cpu); |
524 | for_each_online_cpu(i) { | |
525 | struct device_node *np = cpu_to_l2cache(i); | |
526 | if (!np) | |
527 | continue; | |
528 | if (np == l2_cache) { | |
529 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
530 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
531 | } | |
532 | of_node_put(np); | |
533 | } | |
534 | of_node_put(l2_cache); | |
b7d7a240 | 535 | ipi_call_unlock(); |
1da177e4 LT |
536 | |
537 | local_irq_enable(); | |
538 | ||
539 | cpu_idle(); | |
540 | return 0; | |
541 | } | |
542 | ||
543 | int setup_profiling_timer(unsigned int multiplier) | |
544 | { | |
545 | return 0; | |
546 | } | |
547 | ||
548 | void __init smp_cpus_done(unsigned int max_cpus) | |
549 | { | |
550 | cpumask_t old_mask; | |
551 | ||
552 | /* We want the setup_cpu() here to be called from CPU 0, but our | |
553 | * init thread may have been "borrowed" by another CPU in the meantime | |
554 | * se we pin us down to CPU 0 for a short while | |
555 | */ | |
556 | old_mask = current->cpus_allowed; | |
557 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | |
558 | ||
8cffc6ac BH |
559 | if (smp_ops) |
560 | smp_ops->setup_cpu(boot_cpuid); | |
1da177e4 LT |
561 | |
562 | set_cpus_allowed(current, old_mask); | |
4b703a23 | 563 | |
c6622f63 PM |
564 | snapshot_timebases(); |
565 | ||
4b703a23 | 566 | dump_numa_cpu_topology(); |
1da177e4 LT |
567 | } |
568 | ||
569 | #ifdef CONFIG_HOTPLUG_CPU | |
570 | int __cpu_disable(void) | |
571 | { | |
440a0857 | 572 | struct device_node *l2_cache; |
e2075f79 NL |
573 | int cpu = smp_processor_id(); |
574 | int base, i; | |
575 | int err; | |
1da177e4 | 576 | |
e2075f79 NL |
577 | if (!smp_ops->cpu_disable) |
578 | return -ENOSYS; | |
579 | ||
580 | err = smp_ops->cpu_disable(); | |
581 | if (err) | |
582 | return err; | |
583 | ||
584 | /* Update sibling maps */ | |
585 | base = cpu_first_thread_in_core(cpu); | |
586 | for (i = 0; i < threads_per_core; i++) { | |
587 | cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); | |
588 | cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); | |
440a0857 NL |
589 | cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); |
590 | cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); | |
591 | } | |
592 | ||
593 | l2_cache = cpu_to_l2cache(cpu); | |
594 | for_each_present_cpu(i) { | |
595 | struct device_node *np = cpu_to_l2cache(i); | |
596 | if (!np) | |
597 | continue; | |
598 | if (np == l2_cache) { | |
599 | cpu_clear(cpu, per_cpu(cpu_core_map, i)); | |
600 | cpu_clear(i, per_cpu(cpu_core_map, cpu)); | |
601 | } | |
602 | of_node_put(np); | |
e2075f79 | 603 | } |
440a0857 NL |
604 | of_node_put(l2_cache); |
605 | ||
e2075f79 NL |
606 | |
607 | return 0; | |
1da177e4 LT |
608 | } |
609 | ||
610 | void __cpu_die(unsigned int cpu) | |
611 | { | |
612 | if (smp_ops->cpu_die) | |
613 | smp_ops->cpu_die(cpu); | |
614 | } | |
615 | #endif |