2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
26 #include <asm/atomic.h>
27 #include <asm/cacheflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/processor.h>
33 #include <asm/tlbflush.h>
34 #include <asm/ptrace.h>
37 * as from 2.5, kernels no longer have an init_tasks structure
38 * so we need some other way of telling a new secondary core
39 * where to place its SVC stack
41 struct secondary_data secondary_data
;
44 * structures for inter-processor calls
45 * - A collection of single bit ipi messages.
49 unsigned long ipi_count
;
53 static DEFINE_PER_CPU(struct ipi_data
, ipi_data
) = {
54 .lock
= SPIN_LOCK_UNLOCKED
,
65 int __cpuinit
__cpu_up(unsigned int cpu
)
67 struct cpuinfo_arm
*ci
= &per_cpu(cpu_data
, cpu
);
68 struct task_struct
*idle
= ci
->idle
;
74 * Spawn a new process manually, if not already done.
75 * Grab a pointer to its task struct so we can mess with it
78 idle
= fork_idle(cpu
);
80 printk(KERN_ERR
"CPU%u: fork() failed\n", cpu
);
87 * Allocate initial page tables to allow the new CPU to
88 * enable the MMU safely. This essentially means a set
89 * of our "standard" page tables, with the addition of
90 * a 1:1 mapping for the physical address of the kernel.
92 pgd
= pgd_alloc(&init_mm
);
93 pmd
= pmd_offset(pgd
+ pgd_index(PHYS_OFFSET
), PHYS_OFFSET
);
94 *pmd
= __pmd((PHYS_OFFSET
& PGDIR_MASK
) |
95 PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
);
99 * We need to tell the secondary core where to find
100 * its stack and the page tables.
102 secondary_data
.stack
= task_stack_page(idle
) + THREAD_START_SP
;
103 secondary_data
.pgdir
= virt_to_phys(pgd
);
107 * Now bring the CPU into our world.
109 ret
= boot_secondary(cpu
, idle
);
111 unsigned long timeout
;
114 * CPU was successfully started, wait for it
115 * to come online or time out.
117 timeout
= jiffies
+ HZ
;
118 while (time_before(jiffies
, timeout
)) {
126 if (!cpu_online(cpu
))
130 secondary_data
.stack
= NULL
;
131 secondary_data
.pgdir
= 0;
134 clean_pmd_entry(pmd
);
135 pgd_free(&init_mm
, pgd
);
138 printk(KERN_CRIT
"CPU%u: processor failed to boot\n", cpu
);
141 * FIXME: We need to clean up the new idle thread. --rmk
148 #ifdef CONFIG_HOTPLUG_CPU
150 * __cpu_disable runs on the processor to be shutdown.
152 int __cpuexit
__cpu_disable(void)
154 unsigned int cpu
= smp_processor_id();
155 struct task_struct
*p
;
158 ret
= mach_cpu_disable(cpu
);
163 * Take this CPU offline. Once we clear this, we can't return,
164 * and we must not schedule until we're ready to give up the cpu.
166 cpu_clear(cpu
, cpu_online_map
);
169 * OK - migrate IRQs away from this CPU
174 * Stop the local timer for this CPU.
179 * Flush user cache and TLB mappings, and then remove this CPU
180 * from the vm mask set of all processes.
183 local_flush_tlb_all();
185 read_lock(&tasklist_lock
);
186 for_each_process(p
) {
188 cpu_clear(cpu
, p
->mm
->cpu_vm_mask
);
190 read_unlock(&tasklist_lock
);
196 * called on the thread which is asking for a CPU to be shutdown -
197 * waits until shutdown has completed, or it is timed out.
199 void __cpuexit
__cpu_die(unsigned int cpu
)
201 if (!platform_cpu_kill(cpu
))
202 printk("CPU%u: unable to kill\n", cpu
);
206 * Called from the idle thread for the CPU which has been shutdown.
208 * Note that we disable IRQs here, but do not re-enable them
209 * before returning to the caller. This is also the behaviour
210 * of the other hotplug-cpu capable cores, so presumably coming
211 * out of idle fixes this.
213 void __cpuexit
cpu_die(void)
215 unsigned int cpu
= smp_processor_id();
221 * actual CPU shutdown procedure is at least platform (if not
224 platform_cpu_die(cpu
);
227 * Do not return to the idle loop - jump back to the secondary
228 * cpu initialisation. There's some initialisation which needs
229 * to be repeated to undo the effects of taking the CPU offline.
231 __asm__("mov sp, %0\n"
232 " b secondary_start_kernel"
234 : "r" (task_stack_page(current
) + THREAD_SIZE
- 8));
236 #endif /* CONFIG_HOTPLUG_CPU */
239 * This is the secondary CPU boot entry. We're using this CPUs
240 * idle thread stack, but a set of temporary page tables.
242 asmlinkage
void __cpuinit
secondary_start_kernel(void)
244 struct mm_struct
*mm
= &init_mm
;
245 unsigned int cpu
= smp_processor_id();
247 printk("CPU%u: Booted secondary processor\n", cpu
);
250 * All kernel threads share the same mm context; grab a
251 * reference and switch to it.
253 atomic_inc(&mm
->mm_users
);
254 atomic_inc(&mm
->mm_count
);
255 current
->active_mm
= mm
;
256 cpu_set(cpu
, mm
->cpu_vm_mask
);
257 cpu_switch_mm(mm
->pgd
, mm
);
258 enter_lazy_tlb(mm
, current
);
259 local_flush_tlb_all();
265 * Give the platform a chance to do its own initialisation.
267 platform_secondary_init(cpu
);
270 * Enable local interrupts.
272 notify_cpu_starting(cpu
);
277 * Setup local timer for this CPU.
283 smp_store_cpu_info(cpu
);
286 * OK, now it's safe to let the boot CPU continue
288 cpu_set(cpu
, cpu_online_map
);
291 * OK, it's off to the idle thread for us
297 * Called by both boot and secondaries to move global data into
298 * per-processor storage.
300 void __cpuinit
smp_store_cpu_info(unsigned int cpuid
)
302 struct cpuinfo_arm
*cpu_info
= &per_cpu(cpu_data
, cpuid
);
304 cpu_info
->loops_per_jiffy
= loops_per_jiffy
;
307 void __init
smp_cpus_done(unsigned int max_cpus
)
310 unsigned long bogosum
= 0;
312 for_each_online_cpu(cpu
)
313 bogosum
+= per_cpu(cpu_data
, cpu
).loops_per_jiffy
;
315 printk(KERN_INFO
"SMP: Total of %d processors activated "
316 "(%lu.%02lu BogoMIPS).\n",
318 bogosum
/ (500000/HZ
),
319 (bogosum
/ (5000/HZ
)) % 100);
322 void __init
smp_prepare_boot_cpu(void)
324 unsigned int cpu
= smp_processor_id();
326 per_cpu(cpu_data
, cpu
).idle
= current
;
329 static void send_ipi_message(cpumask_t callmap
, enum ipi_msg_type msg
)
334 local_irq_save(flags
);
336 for_each_cpu_mask(cpu
, callmap
) {
337 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
339 spin_lock(&ipi
->lock
);
340 ipi
->bits
|= 1 << msg
;
341 spin_unlock(&ipi
->lock
);
345 * Call the platform specific cross-CPU call function.
347 smp_cross_call(callmap
);
349 local_irq_restore(flags
);
352 void arch_send_call_function_ipi(cpumask_t mask
)
354 send_ipi_message(mask
, IPI_CALL_FUNC
);
357 void arch_send_call_function_single_ipi(int cpu
)
359 send_ipi_message(cpumask_of_cpu(cpu
), IPI_CALL_FUNC_SINGLE
);
362 void show_ipi_list(struct seq_file
*p
)
368 for_each_present_cpu(cpu
)
369 seq_printf(p
, " %10lu", per_cpu(ipi_data
, cpu
).ipi_count
);
374 void show_local_irqs(struct seq_file
*p
)
378 seq_printf(p
, "LOC: ");
380 for_each_present_cpu(cpu
)
381 seq_printf(p
, "%10u ", irq_stat
[cpu
].local_timer_irqs
);
386 static void ipi_timer(void)
389 local_timer_interrupt();
393 #ifdef CONFIG_LOCAL_TIMERS
394 asmlinkage
void __exception
do_local_timer(struct pt_regs
*regs
)
396 struct pt_regs
*old_regs
= set_irq_regs(regs
);
397 int cpu
= smp_processor_id();
399 if (local_timer_ack()) {
400 irq_stat
[cpu
].local_timer_irqs
++;
404 set_irq_regs(old_regs
);
408 static DEFINE_SPINLOCK(stop_lock
);
411 * ipi_cpu_stop - handle IPI from smp_send_stop()
413 static void ipi_cpu_stop(unsigned int cpu
)
415 spin_lock(&stop_lock
);
416 printk(KERN_CRIT
"CPU%u: stopping\n", cpu
);
418 spin_unlock(&stop_lock
);
420 cpu_clear(cpu
, cpu_online_map
);
430 * Main handler for inter-processor interrupts
432 * For ARM, the ipimask now only identifies a single
433 * category of IPI (Bit 1 IPIs have been replaced by a
434 * different mechanism):
436 * Bit 0 - Inter-processor function call
438 asmlinkage
void __exception
do_IPI(struct pt_regs
*regs
)
440 unsigned int cpu
= smp_processor_id();
441 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
442 struct pt_regs
*old_regs
= set_irq_regs(regs
);
449 spin_lock(&ipi
->lock
);
452 spin_unlock(&ipi
->lock
);
460 nextmsg
= msgs
& -msgs
;
462 nextmsg
= ffz(~nextmsg
);
471 * nothing more to do - eveything is
472 * done on the interrupt return path
477 generic_smp_call_function_interrupt();
480 case IPI_CALL_FUNC_SINGLE
:
481 generic_smp_call_function_single_interrupt();
489 printk(KERN_CRIT
"CPU%u: Unknown IPI message 0x%x\n",
496 set_irq_regs(old_regs
);
499 void smp_send_reschedule(int cpu
)
501 send_ipi_message(cpumask_of_cpu(cpu
), IPI_RESCHEDULE
);
504 void smp_send_timer(void)
506 cpumask_t mask
= cpu_online_map
;
507 cpu_clear(smp_processor_id(), mask
);
508 send_ipi_message(mask
, IPI_TIMER
);
511 void smp_timer_broadcast(cpumask_t mask
)
513 send_ipi_message(mask
, IPI_TIMER
);
516 void smp_send_stop(void)
518 cpumask_t mask
= cpu_online_map
;
519 cpu_clear(smp_processor_id(), mask
);
520 send_ipi_message(mask
, IPI_CPU_STOP
);
526 int setup_profiling_timer(unsigned int multiplier
)
532 on_each_cpu_mask(void (*func
)(void *), void *info
, int wait
, cpumask_t mask
)
538 ret
= smp_call_function_mask(mask
, func
, info
, wait
);
539 if (cpu_isset(smp_processor_id(), mask
))
547 /**********************************************************************/
553 struct vm_area_struct
*ta_vma
;
554 unsigned long ta_start
;
555 unsigned long ta_end
;
558 static inline void ipi_flush_tlb_all(void *ignored
)
560 local_flush_tlb_all();
563 static inline void ipi_flush_tlb_mm(void *arg
)
565 struct mm_struct
*mm
= (struct mm_struct
*)arg
;
567 local_flush_tlb_mm(mm
);
570 static inline void ipi_flush_tlb_page(void *arg
)
572 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
574 local_flush_tlb_page(ta
->ta_vma
, ta
->ta_start
);
577 static inline void ipi_flush_tlb_kernel_page(void *arg
)
579 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
581 local_flush_tlb_kernel_page(ta
->ta_start
);
584 static inline void ipi_flush_tlb_range(void *arg
)
586 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
588 local_flush_tlb_range(ta
->ta_vma
, ta
->ta_start
, ta
->ta_end
);
591 static inline void ipi_flush_tlb_kernel_range(void *arg
)
593 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
595 local_flush_tlb_kernel_range(ta
->ta_start
, ta
->ta_end
);
598 void flush_tlb_all(void)
600 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
603 void flush_tlb_mm(struct mm_struct
*mm
)
605 cpumask_t mask
= mm
->cpu_vm_mask
;
607 on_each_cpu_mask(ipi_flush_tlb_mm
, mm
, 1, mask
);
610 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
612 cpumask_t mask
= vma
->vm_mm
->cpu_vm_mask
;
618 on_each_cpu_mask(ipi_flush_tlb_page
, &ta
, 1, mask
);
621 void flush_tlb_kernel_page(unsigned long kaddr
)
627 on_each_cpu(ipi_flush_tlb_kernel_page
, &ta
, 1);
630 void flush_tlb_range(struct vm_area_struct
*vma
,
631 unsigned long start
, unsigned long end
)
633 cpumask_t mask
= vma
->vm_mm
->cpu_vm_mask
;
640 on_each_cpu_mask(ipi_flush_tlb_range
, &ta
, 1, mask
);
643 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
650 on_each_cpu(ipi_flush_tlb_kernel_range
, &ta
, 1);