Disintegrate asm/system.h for PA-RISC
[deliverable/linux.git] / arch / powerpc / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * SMP support for ppc.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#undef DEBUG
19
1da177e4 20#include <linux/kernel.h>
4b16f8e2 21#include <linux/export.h>
1da177e4
LT
22#include <linux/sched.h>
23#include <linux/smp.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <linux/cache.h>
29#include <linux/err.h>
8a25a2fd 30#include <linux/device.h>
1da177e4
LT
31#include <linux/cpu.h>
32#include <linux/notifier.h>
4b703a23 33#include <linux/topology.h>
1da177e4
LT
34
35#include <asm/ptrace.h>
60063497 36#include <linux/atomic.h>
1da177e4
LT
37#include <asm/irq.h>
38#include <asm/page.h>
39#include <asm/pgtable.h>
40#include <asm/prom.h>
41#include <asm/smp.h>
1da177e4
LT
42#include <asm/time.h>
43#include <asm/machdep.h>
e2075f79 44#include <asm/cputhreads.h>
1da177e4
LT
45#include <asm/cputable.h>
46#include <asm/system.h>
bbeb3f4c 47#include <asm/mpic.h>
a7f290da 48#include <asm/vdso_datapage.h>
5ad57078
PM
49#ifdef CONFIG_PPC64
50#include <asm/paca.h>
51#endif
52
1da177e4 53#ifdef DEBUG
f9e4ec57 54#include <asm/udbg.h>
1da177e4
LT
55#define DBG(fmt...) udbg_printf(fmt)
56#else
57#define DBG(fmt...)
58#endif
59
c56e5853
BH
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
fb82b839
BH
73
74/* State of each CPU during hotplug phases */
75static DEFINE_PER_CPU(int, cpu_state) = { 0 };
76
c56e5853
BH
77#else
78static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
79#define get_idle_for_cpu(x) (idle_thread_array[(x)])
80#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
81#endif
82
f9e4ec57
ME
83struct thread_info *secondary_ti;
84
cc1ba8ea
AB
85DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
86DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
1da177e4 87
d5a7430d 88EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
440a0857 89EXPORT_PER_CPU_SYMBOL(cpu_core_map);
1da177e4 90
5ad57078 91/* SMP operations for this machine */
1da177e4
LT
92struct smp_ops_t *smp_ops;
93
7ccbe504
BH
94/* Can't be static due to PowerMac hackery */
95volatile unsigned int cpu_callin_map[NR_CPUS];
1da177e4 96
1da177e4
LT
97int smt_enabled_at_boot = 1;
98
cc532915
ME
99static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
100
5ad57078 101#ifdef CONFIG_PPC64
de300974 102int __devinit smp_generic_kick_cpu(int nr)
1da177e4
LT
103{
104 BUG_ON(nr < 0 || nr >= NR_CPUS);
105
106 /*
107 * The processor is currently spinning, waiting for the
108 * cpu_start field to become non-zero After we set cpu_start,
109 * the processor will continue on to secondary_start
110 */
fb82b839
BH
111 if (!paca[nr].cpu_start) {
112 paca[nr].cpu_start = 1;
113 smp_mb();
114 return 0;
115 }
116
117#ifdef CONFIG_HOTPLUG_CPU
118 /*
119 * Ok it's not there, so it might be soft-unplugged, let's
120 * try to bring it back
121 */
122 per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
123 smp_wmb();
124 smp_send_reschedule(nr);
125#endif /* CONFIG_HOTPLUG_CPU */
de300974
ME
126
127 return 0;
1da177e4 128}
fb82b839 129#endif /* CONFIG_PPC64 */
1da177e4 130
25ddd738
MM
131static irqreturn_t call_function_action(int irq, void *data)
132{
133 generic_smp_call_function_interrupt();
134 return IRQ_HANDLED;
135}
136
137static irqreturn_t reschedule_action(int irq, void *data)
138{
184748cc 139 scheduler_ipi();
25ddd738
MM
140 return IRQ_HANDLED;
141}
142
143static irqreturn_t call_function_single_action(int irq, void *data)
144{
145 generic_smp_call_function_single_interrupt();
146 return IRQ_HANDLED;
147}
148
7ef71d75 149static irqreturn_t debug_ipi_action(int irq, void *data)
25ddd738 150{
23d72bfd
MM
151 if (crash_ipi_function_ptr) {
152 crash_ipi_function_ptr(get_irq_regs());
153 return IRQ_HANDLED;
154 }
155
156#ifdef CONFIG_DEBUGGER
157 debugger_ipi(get_irq_regs());
158#endif /* CONFIG_DEBUGGER */
159
25ddd738
MM
160 return IRQ_HANDLED;
161}
162
163static irq_handler_t smp_ipi_action[] = {
164 [PPC_MSG_CALL_FUNCTION] = call_function_action,
165 [PPC_MSG_RESCHEDULE] = reschedule_action,
166 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
167 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
168};
169
170const char *smp_ipi_name[] = {
171 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
172 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
173 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
174 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
175};
176
177/* optional function to request ipi, for controllers with >= 4 ipis */
178int smp_request_message_ipi(int virq, int msg)
179{
180 int err;
181
182 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
183 return -EINVAL;
184 }
185#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
186 if (msg == PPC_MSG_DEBUGGER_BREAK) {
187 return 1;
188 }
189#endif
3b5e16d7
TG
190 err = request_irq(virq, smp_ipi_action[msg],
191 IRQF_PERCPU | IRQF_NO_THREAD,
25ddd738
MM
192 smp_ipi_name[msg], 0);
193 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
194 virq, smp_ipi_name[msg], err);
195
196 return err;
197}
198
1ece355b 199#ifdef CONFIG_PPC_SMP_MUXED_IPI
23d72bfd 200struct cpu_messages {
71454272 201 int messages; /* current messages */
23d72bfd
MM
202 unsigned long data; /* data for cause ipi */
203};
204static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
205
206void smp_muxed_ipi_set_data(int cpu, unsigned long data)
207{
208 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
209
210 info->data = data;
211}
212
213void smp_muxed_ipi_message_pass(int cpu, int msg)
214{
215 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
71454272 216 char *message = (char *)&info->messages;
23d72bfd 217
71454272 218 message[msg] = 1;
23d72bfd
MM
219 mb();
220 smp_ops->cause_ipi(cpu, info->data);
221}
222
23d72bfd
MM
223irqreturn_t smp_ipi_demux(void)
224{
225 struct cpu_messages *info = &__get_cpu_var(ipi_message);
71454272 226 unsigned int all;
23d72bfd
MM
227
228 mb(); /* order any irq clear */
71454272
MM
229
230 do {
231 all = xchg_local(&info->messages, 0);
232
233#ifdef __BIG_ENDIAN
234 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
23d72bfd 235 generic_smp_call_function_interrupt();
71454272 236 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
880102e7 237 scheduler_ipi();
71454272 238 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
23d72bfd 239 generic_smp_call_function_single_interrupt();
71454272 240 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
23d72bfd 241 debug_ipi_action(0, NULL);
71454272
MM
242#else
243#error Unsupported ENDIAN
23d72bfd 244#endif
71454272
MM
245 } while (info->messages);
246
23d72bfd
MM
247 return IRQ_HANDLED;
248}
1ece355b 249#endif /* CONFIG_PPC_SMP_MUXED_IPI */
23d72bfd 250
9ca980dc
PM
251static inline void do_message_pass(int cpu, int msg)
252{
253 if (smp_ops->message_pass)
254 smp_ops->message_pass(cpu, msg);
255#ifdef CONFIG_PPC_SMP_MUXED_IPI
256 else
257 smp_muxed_ipi_message_pass(cpu, msg);
258#endif
259}
260
1da177e4
LT
261void smp_send_reschedule(int cpu)
262{
8cffc6ac 263 if (likely(smp_ops))
9ca980dc 264 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
1da177e4 265}
de56a948 266EXPORT_SYMBOL_GPL(smp_send_reschedule);
1da177e4 267
b7d7a240
JA
268void arch_send_call_function_single_ipi(int cpu)
269{
9ca980dc 270 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
b7d7a240
JA
271}
272
f063ea02 273void arch_send_call_function_ipi_mask(const struct cpumask *mask)
b7d7a240
JA
274{
275 unsigned int cpu;
276
f063ea02 277 for_each_cpu(cpu, mask)
9ca980dc 278 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
b7d7a240
JA
279}
280
e0476371
MM
281#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
282void smp_send_debugger_break(void)
1da177e4 283{
e0476371
MM
284 int cpu;
285 int me = raw_smp_processor_id();
286
287 if (unlikely(!smp_ops))
288 return;
289
290 for_each_online_cpu(cpu)
291 if (cpu != me)
9ca980dc 292 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
1da177e4
LT
293}
294#endif
295
cc532915
ME
296#ifdef CONFIG_KEXEC
297void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
298{
299 crash_ipi_function_ptr = crash_ipi_callback;
e0476371 300 if (crash_ipi_callback) {
cc532915 301 mb();
e0476371 302 smp_send_debugger_break();
cc532915
ME
303 }
304}
305#endif
306
1da177e4
LT
307static void stop_this_cpu(void *dummy)
308{
8389b37d
VB
309 /* Remove this CPU */
310 set_cpu_online(smp_processor_id(), false);
311
1da177e4
LT
312 local_irq_disable();
313 while (1)
314 ;
315}
316
8fd7675c
SS
317void smp_send_stop(void)
318{
8691e5a8 319 smp_call_function(stop_this_cpu, NULL, 0);
1da177e4
LT
320}
321
1da177e4
LT
322struct thread_info *current_set[NR_CPUS];
323
1da177e4
LT
324static void __devinit smp_store_cpu_info(int id)
325{
6b7487fc 326 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
3160b097
BB
327#ifdef CONFIG_PPC_FSL_BOOK3E
328 per_cpu(next_tlbcam_idx, id)
329 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
330#endif
1da177e4
LT
331}
332
1da177e4
LT
333void __init smp_prepare_cpus(unsigned int max_cpus)
334{
335 unsigned int cpu;
336
337 DBG("smp_prepare_cpus\n");
338
339 /*
340 * setup_cpu may need to be called on the boot cpu. We havent
341 * spun any cpus up but lets be paranoid.
342 */
343 BUG_ON(boot_cpuid != smp_processor_id());
344
345 /* Fixup boot cpu */
346 smp_store_cpu_info(boot_cpuid);
347 cpu_callin_map[boot_cpuid] = 1;
348
cc1ba8ea
AB
349 for_each_possible_cpu(cpu) {
350 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
351 GFP_KERNEL, cpu_to_node(cpu));
352 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
353 GFP_KERNEL, cpu_to_node(cpu));
354 }
355
356 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
357 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
358
8cffc6ac 359 if (smp_ops)
757cbd46
KG
360 if (smp_ops->probe)
361 max_cpus = smp_ops->probe();
362 else
363 max_cpus = NR_CPUS;
8cffc6ac
BH
364 else
365 max_cpus = 1;
1da177e4
LT
366}
367
368void __devinit smp_prepare_boot_cpu(void)
369{
370 BUG_ON(smp_processor_id() != boot_cpuid);
5ad57078 371#ifdef CONFIG_PPC64
1da177e4 372 paca[boot_cpuid].__current = current;
5ad57078 373#endif
b5e2fc1c 374 current_set[boot_cpuid] = task_thread_info(current);
1da177e4
LT
375}
376
377#ifdef CONFIG_HOTPLUG_CPU
1da177e4
LT
378
379int generic_cpu_disable(void)
380{
381 unsigned int cpu = smp_processor_id();
382
383 if (cpu == boot_cpuid)
384 return -EBUSY;
385
ea0f1cab 386 set_cpu_online(cpu, false);
799d6046 387#ifdef CONFIG_PPC64
a7f290da 388 vdso_data->processorCount--;
094fe2e7 389#endif
1c91cc57 390 migrate_irqs();
1da177e4
LT
391 return 0;
392}
393
1da177e4
LT
394void generic_cpu_die(unsigned int cpu)
395{
396 int i;
397
398 for (i = 0; i < 100; i++) {
0d8d4d42 399 smp_rmb();
1da177e4
LT
400 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
401 return;
402 msleep(100);
403 }
404 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
405}
406
407void generic_mach_cpu_die(void)
408{
409 unsigned int cpu;
410
411 local_irq_disable();
4fcb8833 412 idle_task_exit();
1da177e4
LT
413 cpu = smp_processor_id();
414 printk(KERN_DEBUG "CPU%d offline\n", cpu);
415 __get_cpu_var(cpu_state) = CPU_DEAD;
0d8d4d42 416 smp_wmb();
1da177e4
LT
417 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
418 cpu_relax();
1da177e4 419}
105765f4
BH
420
421void generic_set_cpu_dead(unsigned int cpu)
422{
423 per_cpu(cpu_state, cpu) = CPU_DEAD;
424}
fb82b839
BH
425
426int generic_check_cpu_restart(unsigned int cpu)
427{
428 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
429}
1da177e4
LT
430#endif
431
c56e5853
BH
432struct create_idle {
433 struct work_struct work;
434 struct task_struct *idle;
435 struct completion done;
436 int cpu;
437};
438
439static void __cpuinit do_fork_idle(struct work_struct *work)
440{
441 struct create_idle *c_idle =
442 container_of(work, struct create_idle, work);
443
444 c_idle->idle = fork_idle(c_idle->cpu);
445 complete(&c_idle->done);
446}
447
448static int __cpuinit create_idle(unsigned int cpu)
449{
450 struct thread_info *ti;
451 struct create_idle c_idle = {
452 .cpu = cpu,
453 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
454 };
455 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
456
457 c_idle.idle = get_idle_for_cpu(cpu);
458
459 /* We can't use kernel_thread since we must avoid to
460 * reschedule the child. We use a workqueue because
461 * we want to fork from a kernel thread, not whatever
462 * userspace process happens to be trying to online us.
463 */
464 if (!c_idle.idle) {
465 schedule_work(&c_idle.work);
466 wait_for_completion(&c_idle.done);
467 } else
468 init_idle(c_idle.idle, cpu);
469 if (IS_ERR(c_idle.idle)) {
470 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
471 return PTR_ERR(c_idle.idle);
472 }
473 ti = task_thread_info(c_idle.idle);
474
475#ifdef CONFIG_PPC64
476 paca[cpu].__current = c_idle.idle;
477 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
478#endif
479 ti->cpu = cpu;
480 current_set[cpu] = ti;
481
482 return 0;
483}
484
b282b6f8 485int __cpuinit __cpu_up(unsigned int cpu)
1da177e4 486{
c56e5853 487 int rc, c;
1da177e4 488
8cffc6ac
BH
489 if (smp_ops == NULL ||
490 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1da177e4
LT
491 return -EINVAL;
492
c56e5853
BH
493 /* Make sure we have an idle thread */
494 rc = create_idle(cpu);
495 if (rc)
496 return rc;
497
c560bbce 498 secondary_ti = current_set[cpu];
499
1da177e4
LT
500 /* Make sure callin-map entry is 0 (can be leftover a CPU
501 * hotplug
502 */
503 cpu_callin_map[cpu] = 0;
504
505 /* The information for processor bringup must
506 * be written out to main store before we release
507 * the processor.
508 */
0d8d4d42 509 smp_mb();
1da177e4
LT
510
511 /* wake up cpus */
512 DBG("smp: kicking cpu %d\n", cpu);
de300974
ME
513 rc = smp_ops->kick_cpu(cpu);
514 if (rc) {
515 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
516 return rc;
517 }
1da177e4
LT
518
519 /*
520 * wait to see if the cpu made a callin (is actually up).
521 * use this value that I found through experimentation.
522 * -- Cort
523 */
524 if (system_state < SYSTEM_RUNNING)
ee0339f2 525 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1da177e4
LT
526 udelay(100);
527#ifdef CONFIG_HOTPLUG_CPU
528 else
529 /*
530 * CPUs can take much longer to come up in the
531 * hotplug case. Wait five seconds.
532 */
67764263
GS
533 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
534 msleep(1);
1da177e4
LT
535#endif
536
537 if (!cpu_callin_map[cpu]) {
6685a477 538 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1da177e4
LT
539 return -ENOENT;
540 }
541
6685a477 542 DBG("Processor %u found.\n", cpu);
1da177e4
LT
543
544 if (smp_ops->give_timebase)
545 smp_ops->give_timebase();
546
547 /* Wait until cpu puts itself in the online map */
548 while (!cpu_online(cpu))
549 cpu_relax();
550
551 return 0;
552}
553
e9efed3b
NL
554/* Return the value of the reg property corresponding to the given
555 * logical cpu.
556 */
557int cpu_to_core_id(int cpu)
558{
559 struct device_node *np;
560 const int *reg;
561 int id = -1;
562
563 np = of_get_cpu_node(cpu, NULL);
564 if (!np)
565 goto out;
566
567 reg = of_get_property(np, "reg", NULL);
568 if (!reg)
569 goto out;
570
571 id = *reg;
572out:
573 of_node_put(np);
574 return id;
575}
576
99d86705
VS
577/* Helper routines for cpu to core mapping */
578int cpu_core_index_of_thread(int cpu)
579{
580 return cpu >> threads_shift;
581}
582EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
583
584int cpu_first_thread_of_core(int core)
585{
586 return core << threads_shift;
587}
588EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
589
104699c0 590/* Must be called when no change can occur to cpu_present_mask,
440a0857
NL
591 * i.e. during cpu online or offline.
592 */
593static struct device_node *cpu_to_l2cache(int cpu)
594{
595 struct device_node *np;
b2ea25b9 596 struct device_node *cache;
440a0857
NL
597
598 if (!cpu_present(cpu))
599 return NULL;
600
601 np = of_get_cpu_node(cpu, NULL);
602 if (np == NULL)
603 return NULL;
604
b2ea25b9
NL
605 cache = of_find_next_cache_node(np);
606
440a0857
NL
607 of_node_put(np);
608
b2ea25b9 609 return cache;
440a0857 610}
1da177e4
LT
611
612/* Activate a secondary processor. */
fa3f82c8 613void __devinit start_secondary(void *unused)
1da177e4
LT
614{
615 unsigned int cpu = smp_processor_id();
440a0857 616 struct device_node *l2_cache;
e2075f79 617 int i, base;
1da177e4
LT
618
619 atomic_inc(&init_mm.mm_count);
620 current->active_mm = &init_mm;
621
622 smp_store_cpu_info(cpu);
5ad57078 623 set_dec(tb_ticks_per_jiffy);
e4d76e1c 624 preempt_disable();
1da177e4
LT
625 cpu_callin_map[cpu] = 1;
626
757cbd46
KG
627 if (smp_ops->setup_cpu)
628 smp_ops->setup_cpu(cpu);
1da177e4
LT
629 if (smp_ops->take_timebase)
630 smp_ops->take_timebase();
631
d831d0b8
TB
632 secondary_cpu_time_init();
633
aeeafbfa
BH
634#ifdef CONFIG_PPC64
635 if (system_state == SYSTEM_RUNNING)
636 vdso_data->processorCount++;
637#endif
b7d7a240 638 ipi_call_lock();
e545a614 639 notify_cpu_starting(cpu);
ea0f1cab 640 set_cpu_online(cpu, true);
e2075f79 641 /* Update sibling maps */
99d86705 642 base = cpu_first_thread_sibling(cpu);
e2075f79
NL
643 for (i = 0; i < threads_per_core; i++) {
644 if (cpu_is_offline(base + i))
645 continue;
cc1ba8ea
AB
646 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
647 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
440a0857
NL
648
649 /* cpu_core_map should be a superset of
650 * cpu_sibling_map even if we don't have cache
651 * information, so update the former here, too.
652 */
cc1ba8ea
AB
653 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
654 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
e2075f79 655 }
440a0857
NL
656 l2_cache = cpu_to_l2cache(cpu);
657 for_each_online_cpu(i) {
658 struct device_node *np = cpu_to_l2cache(i);
659 if (!np)
660 continue;
661 if (np == l2_cache) {
cc1ba8ea
AB
662 cpumask_set_cpu(cpu, cpu_core_mask(i));
663 cpumask_set_cpu(i, cpu_core_mask(cpu));
440a0857
NL
664 }
665 of_node_put(np);
666 }
667 of_node_put(l2_cache);
b7d7a240 668 ipi_call_unlock();
1da177e4
LT
669
670 local_irq_enable();
671
672 cpu_idle();
fa3f82c8
BH
673
674 BUG();
1da177e4
LT
675}
676
677int setup_profiling_timer(unsigned int multiplier)
678{
679 return 0;
680}
681
682void __init smp_cpus_done(unsigned int max_cpus)
683{
bfb9126d 684 cpumask_var_t old_mask;
1da177e4
LT
685
686 /* We want the setup_cpu() here to be called from CPU 0, but our
687 * init thread may have been "borrowed" by another CPU in the meantime
688 * se we pin us down to CPU 0 for a short while
689 */
bfb9126d 690 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
104699c0 691 cpumask_copy(old_mask, tsk_cpus_allowed(current));
21dbeb91 692 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
1da177e4 693
757cbd46 694 if (smp_ops && smp_ops->setup_cpu)
8cffc6ac 695 smp_ops->setup_cpu(boot_cpuid);
1da177e4 696
bfb9126d
AB
697 set_cpus_allowed_ptr(current, old_mask);
698
699 free_cpumask_var(old_mask);
4b703a23 700
d7294445
BH
701 if (smp_ops && smp_ops->bringup_done)
702 smp_ops->bringup_done();
703
4b703a23 704 dump_numa_cpu_topology();
d7294445 705
1da177e4
LT
706}
707
e1f0ece1
MN
708int arch_sd_sibling_asym_packing(void)
709{
710 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
711 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
712 return SD_ASYM_PACKING;
713 }
714 return 0;
715}
716
1da177e4
LT
717#ifdef CONFIG_HOTPLUG_CPU
718int __cpu_disable(void)
719{
440a0857 720 struct device_node *l2_cache;
e2075f79
NL
721 int cpu = smp_processor_id();
722 int base, i;
723 int err;
1da177e4 724
e2075f79
NL
725 if (!smp_ops->cpu_disable)
726 return -ENOSYS;
727
728 err = smp_ops->cpu_disable();
729 if (err)
730 return err;
731
732 /* Update sibling maps */
99d86705 733 base = cpu_first_thread_sibling(cpu);
e2075f79 734 for (i = 0; i < threads_per_core; i++) {
cc1ba8ea
AB
735 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
736 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
737 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
738 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
440a0857
NL
739 }
740
741 l2_cache = cpu_to_l2cache(cpu);
742 for_each_present_cpu(i) {
743 struct device_node *np = cpu_to_l2cache(i);
744 if (!np)
745 continue;
746 if (np == l2_cache) {
cc1ba8ea
AB
747 cpumask_clear_cpu(cpu, cpu_core_mask(i));
748 cpumask_clear_cpu(i, cpu_core_mask(cpu));
440a0857
NL
749 }
750 of_node_put(np);
e2075f79 751 }
440a0857
NL
752 of_node_put(l2_cache);
753
e2075f79
NL
754
755 return 0;
1da177e4
LT
756}
757
758void __cpu_die(unsigned int cpu)
759{
760 if (smp_ops->cpu_die)
761 smp_ops->cpu_die(cpu);
762}
d0174c72
NF
763
764static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
765
766void cpu_hotplug_driver_lock()
767{
768 mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
769}
770
771void cpu_hotplug_driver_unlock()
772{
773 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
774}
abb17f9c
MM
775
776void cpu_die(void)
777{
778 if (ppc_md.cpu_die)
779 ppc_md.cpu_die();
fa3f82c8
BH
780
781 /* If we return, we re-enter start_secondary */
782 start_secondary_resume();
abb17f9c 783}
fa3f82c8 784
1da177e4 785#endif
This page took 0.542868 seconds and 5 git commands to generate.