4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/sysdev.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
35 #include <asm/ptrace.h>
36 #include <asm/atomic.h>
39 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/cputable.h>
45 #include <asm/system.h>
47 #include <asm/vdso_datapage.h>
54 #define DBG(fmt...) udbg_printf(fmt)
59 int smp_hw_index
[NR_CPUS
];
60 struct thread_info
*secondary_ti
;
62 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
63 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
64 cpumask_t cpu_sibling_map
[NR_CPUS
] = { [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
66 EXPORT_SYMBOL(cpu_online_map
);
67 EXPORT_SYMBOL(cpu_possible_map
);
69 /* SMP operations for this machine */
70 struct smp_ops_t
*smp_ops
;
72 static volatile unsigned int cpu_callin_map
[NR_CPUS
];
74 void smp_call_function_interrupt(void);
76 int smt_enabled_at_boot
= 1;
78 static void (*crash_ipi_function_ptr
)(struct pt_regs
*) = NULL
;
81 int __init
smp_mpic_probe(void)
85 DBG("smp_mpic_probe()...\n");
87 nr_cpus
= cpus_weight(cpu_possible_map
);
89 DBG("nr_cpus: %d\n", nr_cpus
);
97 void __devinit
smp_mpic_setup_cpu(int cpu
)
99 mpic_setup_this_cpu();
101 #endif /* CONFIG_MPIC */
104 void __devinit
smp_generic_kick_cpu(int nr
)
106 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
109 * The processor is currently spinning, waiting for the
110 * cpu_start field to become non-zero After we set cpu_start,
111 * the processor will continue on to secondary_start
113 paca
[nr
].cpu_start
= 1;
118 void smp_message_recv(int msg
, struct pt_regs
*regs
)
121 case PPC_MSG_CALL_FUNCTION
:
122 smp_call_function_interrupt();
124 case PPC_MSG_RESCHEDULE
:
125 /* XXX Do we have to do this? */
128 case PPC_MSG_DEBUGGER_BREAK
:
129 if (crash_ipi_function_ptr
) {
130 crash_ipi_function_ptr(regs
);
133 #ifdef CONFIG_DEBUGGER
136 #endif /* CONFIG_DEBUGGER */
139 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
140 smp_processor_id(), msg
);
145 void smp_send_reschedule(int cpu
)
147 smp_ops
->message_pass(cpu
, PPC_MSG_RESCHEDULE
);
150 #ifdef CONFIG_DEBUGGER
151 void smp_send_debugger_break(int cpu
)
153 smp_ops
->message_pass(cpu
, PPC_MSG_DEBUGGER_BREAK
);
158 void crash_send_ipi(void (*crash_ipi_callback
)(struct pt_regs
*))
160 crash_ipi_function_ptr
= crash_ipi_callback
;
161 if (crash_ipi_callback
) {
163 smp_ops
->message_pass(MSG_ALL_BUT_SELF
, PPC_MSG_DEBUGGER_BREAK
);
168 static void stop_this_cpu(void *dummy
)
175 void smp_send_stop(void)
177 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
181 * Structure and data for smp_call_function(). This is designed to minimise
182 * static memory requirements. It also looks cleaner.
183 * Stolen from the i386 version.
185 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(call_lock
);
187 static struct call_data_struct
{
188 void (*func
) (void *info
);
195 /* delay of at least 8 seconds */
196 #define SMP_CALL_TIMEOUT 8
199 * This function sends a 'generic call function' IPI to all other CPUs
202 * [SUMMARY] Run a function on all other CPUs.
203 * <func> The function to run. This must be fast and non-blocking.
204 * <info> An arbitrary pointer to pass to the function.
205 * <nonatomic> currently unused.
206 * <wait> If true, wait (atomically) until function has completed on other CPUs.
207 * [RETURNS] 0 on success, else a negative status code. Does not return until
208 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
210 * You must not call this function with disabled interrupts or from a
211 * hardware interrupt handler or from a bottom half handler.
213 int smp_call_function (void (*func
) (void *info
), void *info
, int nonatomic
,
216 struct call_data_struct data
;
220 /* Can deadlock when called with interrupts disabled */
221 WARN_ON(irqs_disabled());
225 atomic_set(&data
.started
, 0);
228 atomic_set(&data
.finished
, 0);
230 spin_lock(&call_lock
);
231 /* Must grab online cpu count with preempt disabled, otherwise
233 cpus
= num_online_cpus() - 1;
241 /* Send a message to all other CPUs and wait for them to respond */
242 smp_ops
->message_pass(MSG_ALL_BUT_SELF
, PPC_MSG_CALL_FUNCTION
);
244 timeout
= get_tb() + (u64
) SMP_CALL_TIMEOUT
* tb_ticks_per_sec
;
246 /* Wait for response */
247 while (atomic_read(&data
.started
) != cpus
) {
249 if (get_tb() >= timeout
) {
250 printk("smp_call_function on cpu %d: other cpus not "
251 "responding (%d)\n", smp_processor_id(),
252 atomic_read(&data
.started
));
259 while (atomic_read(&data
.finished
) != cpus
) {
261 if (get_tb() >= timeout
) {
262 printk("smp_call_function on cpu %d: other "
263 "cpus not finishing (%d/%d)\n",
265 atomic_read(&data
.finished
),
266 atomic_read(&data
.started
));
278 spin_unlock(&call_lock
);
282 EXPORT_SYMBOL(smp_call_function
);
284 void smp_call_function_interrupt(void)
286 void (*func
) (void *info
);
290 /* call_data will be NULL if the sender timed out while
291 * waiting on us to receive the call.
296 func
= call_data
->func
;
297 info
= call_data
->info
;
298 wait
= call_data
->wait
;
301 smp_mb__before_atomic_inc();
304 * Notify initiating CPU that I've grabbed the data and am
305 * about to execute the function
307 atomic_inc(&call_data
->started
);
309 * At this point the info structure may be out of scope unless wait==1
313 smp_mb__before_atomic_inc();
314 atomic_inc(&call_data
->finished
);
318 extern struct gettimeofday_struct do_gtod
;
320 struct thread_info
*current_set
[NR_CPUS
];
322 DECLARE_PER_CPU(unsigned int, pvr
);
324 static void __devinit
smp_store_cpu_info(int id
)
326 per_cpu(pvr
, id
) = mfspr(SPRN_PVR
);
329 static void __init
smp_create_idle(unsigned int cpu
)
331 struct task_struct
*p
;
333 /* create a process for the processor */
336 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
338 paca
[cpu
].__current
= p
;
340 current_set
[cpu
] = p
->thread_info
;
341 p
->thread_info
->cpu
= cpu
;
344 void __init
smp_prepare_cpus(unsigned int max_cpus
)
348 DBG("smp_prepare_cpus\n");
351 * setup_cpu may need to be called on the boot cpu. We havent
352 * spun any cpus up but lets be paranoid.
354 BUG_ON(boot_cpuid
!= smp_processor_id());
357 smp_store_cpu_info(boot_cpuid
);
358 cpu_callin_map
[boot_cpuid
] = 1;
360 max_cpus
= smp_ops
->probe();
362 smp_space_timers(max_cpus
);
365 if (cpu
!= boot_cpuid
)
366 smp_create_idle(cpu
);
369 void __devinit
smp_prepare_boot_cpu(void)
371 BUG_ON(smp_processor_id() != boot_cpuid
);
373 cpu_set(boot_cpuid
, cpu_online_map
);
375 paca
[boot_cpuid
].__current
= current
;
377 current_set
[boot_cpuid
] = current
->thread_info
;
380 #ifdef CONFIG_HOTPLUG_CPU
381 /* State of each CPU during hotplug phases */
382 DEFINE_PER_CPU(int, cpu_state
) = { 0 };
384 int generic_cpu_disable(void)
386 unsigned int cpu
= smp_processor_id();
388 if (cpu
== boot_cpuid
)
391 cpu_clear(cpu
, cpu_online_map
);
393 vdso_data
->processorCount
--;
394 fixup_irqs(cpu_online_map
);
399 int generic_cpu_enable(unsigned int cpu
)
401 /* Do the normal bootup if we haven't
402 * already bootstrapped. */
403 if (system_state
!= SYSTEM_RUNNING
)
406 /* get the target out of it's holding state */
407 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
410 while (!cpu_online(cpu
))
414 fixup_irqs(cpu_online_map
);
415 /* counter the irq disable in fixup_irqs */
421 void generic_cpu_die(unsigned int cpu
)
425 for (i
= 0; i
< 100; i
++) {
427 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
)
431 printk(KERN_ERR
"CPU%d didn't die...\n", cpu
);
434 void generic_mach_cpu_die(void)
439 cpu
= smp_processor_id();
440 printk(KERN_DEBUG
"CPU%d offline\n", cpu
);
441 __get_cpu_var(cpu_state
) = CPU_DEAD
;
443 while (__get_cpu_var(cpu_state
) != CPU_UP_PREPARE
)
449 cpu_set(cpu
, cpu_online_map
);
454 static int __devinit
cpu_enable(unsigned int cpu
)
456 if (smp_ops
->cpu_enable
)
457 return smp_ops
->cpu_enable(cpu
);
462 int __devinit
__cpu_up(unsigned int cpu
)
466 secondary_ti
= current_set
[cpu
];
467 if (!cpu_enable(cpu
))
470 if (smp_ops
->cpu_bootable
&& !smp_ops
->cpu_bootable(cpu
))
473 /* Make sure callin-map entry is 0 (can be leftover a CPU
476 cpu_callin_map
[cpu
] = 0;
478 /* The information for processor bringup must
479 * be written out to main store before we release
485 DBG("smp: kicking cpu %d\n", cpu
);
486 smp_ops
->kick_cpu(cpu
);
489 * wait to see if the cpu made a callin (is actually up).
490 * use this value that I found through experimentation.
493 if (system_state
< SYSTEM_RUNNING
)
494 for (c
= 5000; c
&& !cpu_callin_map
[cpu
]; c
--)
496 #ifdef CONFIG_HOTPLUG_CPU
499 * CPUs can take much longer to come up in the
500 * hotplug case. Wait five seconds.
502 for (c
= 25; c
&& !cpu_callin_map
[cpu
]; c
--) {
507 if (!cpu_callin_map
[cpu
]) {
508 printk("Processor %u is stuck.\n", cpu
);
512 printk("Processor %u found.\n", cpu
);
514 if (smp_ops
->give_timebase
)
515 smp_ops
->give_timebase();
517 /* Wait until cpu puts itself in the online map */
518 while (!cpu_online(cpu
))
525 /* Activate a secondary processor. */
526 int __devinit
start_secondary(void *unused
)
528 unsigned int cpu
= smp_processor_id();
530 atomic_inc(&init_mm
.mm_count
);
531 current
->active_mm
= &init_mm
;
533 smp_store_cpu_info(cpu
);
534 set_dec(tb_ticks_per_jiffy
);
536 cpu_callin_map
[cpu
] = 1;
538 smp_ops
->setup_cpu(cpu
);
539 if (smp_ops
->take_timebase
)
540 smp_ops
->take_timebase();
542 spin_lock(&call_lock
);
543 cpu_set(cpu
, cpu_online_map
);
544 spin_unlock(&call_lock
);
552 int setup_profiling_timer(unsigned int multiplier
)
557 void __init
smp_cpus_done(unsigned int max_cpus
)
561 /* We want the setup_cpu() here to be called from CPU 0, but our
562 * init thread may have been "borrowed" by another CPU in the meantime
563 * se we pin us down to CPU 0 for a short while
565 old_mask
= current
->cpus_allowed
;
566 set_cpus_allowed(current
, cpumask_of_cpu(boot_cpuid
));
568 smp_ops
->setup_cpu(boot_cpuid
);
570 set_cpus_allowed(current
, old_mask
);
573 #ifdef CONFIG_HOTPLUG_CPU
574 int __cpu_disable(void)
576 if (smp_ops
->cpu_disable
)
577 return smp_ops
->cpu_disable();
582 void __cpu_die(unsigned int cpu
)
584 if (smp_ops
->cpu_die
)
585 smp_ops
->cpu_die(cpu
);