ARM: 8559/1: errata: Workaround erratum A12 821420
[deliverable/linux.git] / arch / arm / kernel / smp.c
... / ...
CommitLineData
1/*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/cpu.h>
22#include <linux/seq_file.h>
23#include <linux/irq.h>
24#include <linux/nmi.h>
25#include <linux/percpu.h>
26#include <linux/clockchips.h>
27#include <linux/completion.h>
28#include <linux/cpufreq.h>
29#include <linux/irq_work.h>
30
31#include <linux/atomic.h>
32#include <asm/smp.h>
33#include <asm/cacheflush.h>
34#include <asm/cpu.h>
35#include <asm/cputype.h>
36#include <asm/exception.h>
37#include <asm/idmap.h>
38#include <asm/topology.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/pgalloc.h>
42#include <asm/processor.h>
43#include <asm/sections.h>
44#include <asm/tlbflush.h>
45#include <asm/ptrace.h>
46#include <asm/smp_plat.h>
47#include <asm/virt.h>
48#include <asm/mach/arch.h>
49#include <asm/mpu.h>
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/ipi.h>
53
54/*
55 * as from 2.5, kernels no longer have an init_tasks structure
56 * so we need some other way of telling a new secondary core
57 * where to place its SVC stack
58 */
59struct secondary_data secondary_data;
60
61/*
62 * control for which core is the next to come out of the secondary
63 * boot "holding pen"
64 */
65volatile int pen_release = -1;
66
67enum ipi_msg_type {
68 IPI_WAKEUP,
69 IPI_TIMER,
70 IPI_RESCHEDULE,
71 IPI_CALL_FUNC,
72 IPI_CPU_STOP,
73 IPI_IRQ_WORK,
74 IPI_COMPLETION,
75 IPI_CPU_BACKTRACE,
76 /*
77 * SGI8-15 can be reserved by secure firmware, and thus may
78 * not be usable by the kernel. Please keep the above limited
79 * to at most 8 entries.
80 */
81};
82
83static DECLARE_COMPLETION(cpu_running);
84
85static struct smp_operations smp_ops;
86
87void __init smp_set_ops(const struct smp_operations *ops)
88{
89 if (ops)
90 smp_ops = *ops;
91};
92
93static unsigned long get_arch_pgd(pgd_t *pgd)
94{
95#ifdef CONFIG_ARM_LPAE
96 return __phys_to_pfn(virt_to_phys(pgd));
97#else
98 return virt_to_phys(pgd);
99#endif
100}
101
102int __cpu_up(unsigned int cpu, struct task_struct *idle)
103{
104 int ret;
105
106 if (!smp_ops.smp_boot_secondary)
107 return -ENOSYS;
108
109 /*
110 * We need to tell the secondary core where to find
111 * its stack and the page tables.
112 */
113 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
114#ifdef CONFIG_ARM_MPU
115 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
116#endif
117
118#ifdef CONFIG_MMU
119 secondary_data.pgdir = virt_to_phys(idmap_pgd);
120 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
121#endif
122 sync_cache_w(&secondary_data);
123
124 /*
125 * Now bring the CPU into our world.
126 */
127 ret = smp_ops.smp_boot_secondary(cpu, idle);
128 if (ret == 0) {
129 /*
130 * CPU was successfully started, wait for it
131 * to come online or time out.
132 */
133 wait_for_completion_timeout(&cpu_running,
134 msecs_to_jiffies(1000));
135
136 if (!cpu_online(cpu)) {
137 pr_crit("CPU%u: failed to come online\n", cpu);
138 ret = -EIO;
139 }
140 } else {
141 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
142 }
143
144
145 memset(&secondary_data, 0, sizeof(secondary_data));
146 return ret;
147}
148
149/* platform specific SMP operations */
150void __init smp_init_cpus(void)
151{
152 if (smp_ops.smp_init_cpus)
153 smp_ops.smp_init_cpus();
154}
155
156int platform_can_secondary_boot(void)
157{
158 return !!smp_ops.smp_boot_secondary;
159}
160
161int platform_can_cpu_hotplug(void)
162{
163#ifdef CONFIG_HOTPLUG_CPU
164 if (smp_ops.cpu_kill)
165 return 1;
166#endif
167
168 return 0;
169}
170
171#ifdef CONFIG_HOTPLUG_CPU
172static int platform_cpu_kill(unsigned int cpu)
173{
174 if (smp_ops.cpu_kill)
175 return smp_ops.cpu_kill(cpu);
176 return 1;
177}
178
179static int platform_cpu_disable(unsigned int cpu)
180{
181 if (smp_ops.cpu_disable)
182 return smp_ops.cpu_disable(cpu);
183
184 return 0;
185}
186
187int platform_can_hotplug_cpu(unsigned int cpu)
188{
189 /* cpu_die must be specified to support hotplug */
190 if (!smp_ops.cpu_die)
191 return 0;
192
193 if (smp_ops.cpu_can_disable)
194 return smp_ops.cpu_can_disable(cpu);
195
196 /*
197 * By default, allow disabling all CPUs except the first one,
198 * since this is special on a lot of platforms, e.g. because
199 * of clock tick interrupts.
200 */
201 return cpu != 0;
202}
203
204/*
205 * __cpu_disable runs on the processor to be shutdown.
206 */
207int __cpu_disable(void)
208{
209 unsigned int cpu = smp_processor_id();
210 int ret;
211
212 ret = platform_cpu_disable(cpu);
213 if (ret)
214 return ret;
215
216 /*
217 * Take this CPU offline. Once we clear this, we can't return,
218 * and we must not schedule until we're ready to give up the cpu.
219 */
220 set_cpu_online(cpu, false);
221
222 /*
223 * OK - migrate IRQs away from this CPU
224 */
225 migrate_irqs();
226
227 /*
228 * Flush user cache and TLB mappings, and then remove this CPU
229 * from the vm mask set of all processes.
230 *
231 * Caches are flushed to the Level of Unification Inner Shareable
232 * to write-back dirty lines to unified caches shared by all CPUs.
233 */
234 flush_cache_louis();
235 local_flush_tlb_all();
236
237 clear_tasks_mm_cpumask(cpu);
238
239 return 0;
240}
241
242static DECLARE_COMPLETION(cpu_died);
243
244/*
245 * called on the thread which is asking for a CPU to be shutdown -
246 * waits until shutdown has completed, or it is timed out.
247 */
248void __cpu_die(unsigned int cpu)
249{
250 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
251 pr_err("CPU%u: cpu didn't die\n", cpu);
252 return;
253 }
254 pr_notice("CPU%u: shutdown\n", cpu);
255
256 /*
257 * platform_cpu_kill() is generally expected to do the powering off
258 * and/or cutting of clocks to the dying CPU. Optionally, this may
259 * be done by the CPU which is dying in preference to supporting
260 * this call, but that means there is _no_ synchronisation between
261 * the requesting CPU and the dying CPU actually losing power.
262 */
263 if (!platform_cpu_kill(cpu))
264 pr_err("CPU%u: unable to kill\n", cpu);
265}
266
267/*
268 * Called from the idle thread for the CPU which has been shutdown.
269 *
270 * Note that we disable IRQs here, but do not re-enable them
271 * before returning to the caller. This is also the behaviour
272 * of the other hotplug-cpu capable cores, so presumably coming
273 * out of idle fixes this.
274 */
275void arch_cpu_idle_dead(void)
276{
277 unsigned int cpu = smp_processor_id();
278
279 idle_task_exit();
280
281 local_irq_disable();
282
283 /*
284 * Flush the data out of the L1 cache for this CPU. This must be
285 * before the completion to ensure that data is safely written out
286 * before platform_cpu_kill() gets called - which may disable
287 * *this* CPU and power down its cache.
288 */
289 flush_cache_louis();
290
291 /*
292 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
293 * this returns, power and/or clocks can be removed at any point
294 * from this CPU and its cache by platform_cpu_kill().
295 */
296 complete(&cpu_died);
297
298 /*
299 * Ensure that the cache lines associated with that completion are
300 * written out. This covers the case where _this_ CPU is doing the
301 * powering down, to ensure that the completion is visible to the
302 * CPU waiting for this one.
303 */
304 flush_cache_louis();
305
306 /*
307 * The actual CPU shutdown procedure is at least platform (if not
308 * CPU) specific. This may remove power, or it may simply spin.
309 *
310 * Platforms are generally expected *NOT* to return from this call,
311 * although there are some which do because they have no way to
312 * power down the CPU. These platforms are the _only_ reason we
313 * have a return path which uses the fragment of assembly below.
314 *
315 * The return path should not be used for platforms which can
316 * power off the CPU.
317 */
318 if (smp_ops.cpu_die)
319 smp_ops.cpu_die(cpu);
320
321 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
322 cpu);
323
324 /*
325 * Do not return to the idle loop - jump back to the secondary
326 * cpu initialisation. There's some initialisation which needs
327 * to be repeated to undo the effects of taking the CPU offline.
328 */
329 __asm__("mov sp, %0\n"
330 " mov fp, #0\n"
331 " b secondary_start_kernel"
332 :
333 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
334}
335#endif /* CONFIG_HOTPLUG_CPU */
336
337/*
338 * Called by both boot and secondaries to move global data into
339 * per-processor storage.
340 */
341static void smp_store_cpu_info(unsigned int cpuid)
342{
343 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
344
345 cpu_info->loops_per_jiffy = loops_per_jiffy;
346 cpu_info->cpuid = read_cpuid_id();
347
348 store_cpu_topology(cpuid);
349}
350
351/*
352 * This is the secondary CPU boot entry. We're using this CPUs
353 * idle thread stack, but a set of temporary page tables.
354 */
355asmlinkage void secondary_start_kernel(void)
356{
357 struct mm_struct *mm = &init_mm;
358 unsigned int cpu;
359
360 /*
361 * The identity mapping is uncached (strongly ordered), so
362 * switch away from it before attempting any exclusive accesses.
363 */
364 cpu_switch_mm(mm->pgd, mm);
365 local_flush_bp_all();
366 enter_lazy_tlb(mm, current);
367 local_flush_tlb_all();
368
369 /*
370 * All kernel threads share the same mm context; grab a
371 * reference and switch to it.
372 */
373 cpu = smp_processor_id();
374 atomic_inc(&mm->mm_count);
375 current->active_mm = mm;
376 cpumask_set_cpu(cpu, mm_cpumask(mm));
377
378 cpu_init();
379
380 pr_debug("CPU%u: Booted secondary processor\n", cpu);
381
382 preempt_disable();
383 trace_hardirqs_off();
384
385 /*
386 * Give the platform a chance to do its own initialisation.
387 */
388 if (smp_ops.smp_secondary_init)
389 smp_ops.smp_secondary_init(cpu);
390
391 notify_cpu_starting(cpu);
392
393 calibrate_delay();
394
395 smp_store_cpu_info(cpu);
396
397 /*
398 * OK, now it's safe to let the boot CPU continue. Wait for
399 * the CPU migration code to notice that the CPU is online
400 * before we continue - which happens after __cpu_up returns.
401 */
402 set_cpu_online(cpu, true);
403 complete(&cpu_running);
404
405 local_irq_enable();
406 local_fiq_enable();
407 local_abt_enable();
408
409 /*
410 * OK, it's off to the idle thread for us
411 */
412 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
413}
414
415void __init smp_cpus_done(unsigned int max_cpus)
416{
417 int cpu;
418 unsigned long bogosum = 0;
419
420 for_each_online_cpu(cpu)
421 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
422
423 printk(KERN_INFO "SMP: Total of %d processors activated "
424 "(%lu.%02lu BogoMIPS).\n",
425 num_online_cpus(),
426 bogosum / (500000/HZ),
427 (bogosum / (5000/HZ)) % 100);
428
429 hyp_mode_check();
430}
431
432void __init smp_prepare_boot_cpu(void)
433{
434 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
435}
436
437void __init smp_prepare_cpus(unsigned int max_cpus)
438{
439 unsigned int ncores = num_possible_cpus();
440
441 init_cpu_topology();
442
443 smp_store_cpu_info(smp_processor_id());
444
445 /*
446 * are we trying to boot more cores than exist?
447 */
448 if (max_cpus > ncores)
449 max_cpus = ncores;
450 if (ncores > 1 && max_cpus) {
451 /*
452 * Initialise the present map, which describes the set of CPUs
453 * actually populated at the present time. A platform should
454 * re-initialize the map in the platforms smp_prepare_cpus()
455 * if present != possible (e.g. physical hotplug).
456 */
457 init_cpu_present(cpu_possible_mask);
458
459 /*
460 * Initialise the SCU if there are more than one CPU
461 * and let them know where to start.
462 */
463 if (smp_ops.smp_prepare_cpus)
464 smp_ops.smp_prepare_cpus(max_cpus);
465 }
466}
467
468static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
469
470void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
471{
472 if (!__smp_cross_call)
473 __smp_cross_call = fn;
474}
475
476static const char *ipi_types[NR_IPI] __tracepoint_string = {
477#define S(x,s) [x] = s
478 S(IPI_WAKEUP, "CPU wakeup interrupts"),
479 S(IPI_TIMER, "Timer broadcast interrupts"),
480 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
481 S(IPI_CALL_FUNC, "Function call interrupts"),
482 S(IPI_CPU_STOP, "CPU stop interrupts"),
483 S(IPI_IRQ_WORK, "IRQ work interrupts"),
484 S(IPI_COMPLETION, "completion interrupts"),
485};
486
487static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
488{
489 trace_ipi_raise(target, ipi_types[ipinr]);
490 __smp_cross_call(target, ipinr);
491}
492
493void show_ipi_list(struct seq_file *p, int prec)
494{
495 unsigned int cpu, i;
496
497 for (i = 0; i < NR_IPI; i++) {
498 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
499
500 for_each_online_cpu(cpu)
501 seq_printf(p, "%10u ",
502 __get_irq_stat(cpu, ipi_irqs[i]));
503
504 seq_printf(p, " %s\n", ipi_types[i]);
505 }
506}
507
508u64 smp_irq_stat_cpu(unsigned int cpu)
509{
510 u64 sum = 0;
511 int i;
512
513 for (i = 0; i < NR_IPI; i++)
514 sum += __get_irq_stat(cpu, ipi_irqs[i]);
515
516 return sum;
517}
518
519void arch_send_call_function_ipi_mask(const struct cpumask *mask)
520{
521 smp_cross_call(mask, IPI_CALL_FUNC);
522}
523
524void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
525{
526 smp_cross_call(mask, IPI_WAKEUP);
527}
528
529void arch_send_call_function_single_ipi(int cpu)
530{
531 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
532}
533
534#ifdef CONFIG_IRQ_WORK
535void arch_irq_work_raise(void)
536{
537 if (arch_irq_work_has_interrupt())
538 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
539}
540#endif
541
542#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
543void tick_broadcast(const struct cpumask *mask)
544{
545 smp_cross_call(mask, IPI_TIMER);
546}
547#endif
548
549static DEFINE_RAW_SPINLOCK(stop_lock);
550
551/*
552 * ipi_cpu_stop - handle IPI from smp_send_stop()
553 */
554static void ipi_cpu_stop(unsigned int cpu)
555{
556 if (system_state == SYSTEM_BOOTING ||
557 system_state == SYSTEM_RUNNING) {
558 raw_spin_lock(&stop_lock);
559 pr_crit("CPU%u: stopping\n", cpu);
560 dump_stack();
561 raw_spin_unlock(&stop_lock);
562 }
563
564 set_cpu_online(cpu, false);
565
566 local_fiq_disable();
567 local_irq_disable();
568
569 while (1)
570 cpu_relax();
571}
572
573static DEFINE_PER_CPU(struct completion *, cpu_completion);
574
575int register_ipi_completion(struct completion *completion, int cpu)
576{
577 per_cpu(cpu_completion, cpu) = completion;
578 return IPI_COMPLETION;
579}
580
581static void ipi_complete(unsigned int cpu)
582{
583 complete(per_cpu(cpu_completion, cpu));
584}
585
586/*
587 * Main handler for inter-processor interrupts
588 */
589asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
590{
591 handle_IPI(ipinr, regs);
592}
593
594void handle_IPI(int ipinr, struct pt_regs *regs)
595{
596 unsigned int cpu = smp_processor_id();
597 struct pt_regs *old_regs = set_irq_regs(regs);
598
599 if ((unsigned)ipinr < NR_IPI) {
600 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
601 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
602 }
603
604 switch (ipinr) {
605 case IPI_WAKEUP:
606 break;
607
608#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
609 case IPI_TIMER:
610 irq_enter();
611 tick_receive_broadcast();
612 irq_exit();
613 break;
614#endif
615
616 case IPI_RESCHEDULE:
617 scheduler_ipi();
618 break;
619
620 case IPI_CALL_FUNC:
621 irq_enter();
622 generic_smp_call_function_interrupt();
623 irq_exit();
624 break;
625
626 case IPI_CPU_STOP:
627 irq_enter();
628 ipi_cpu_stop(cpu);
629 irq_exit();
630 break;
631
632#ifdef CONFIG_IRQ_WORK
633 case IPI_IRQ_WORK:
634 irq_enter();
635 irq_work_run();
636 irq_exit();
637 break;
638#endif
639
640 case IPI_COMPLETION:
641 irq_enter();
642 ipi_complete(cpu);
643 irq_exit();
644 break;
645
646 case IPI_CPU_BACKTRACE:
647 printk_nmi_enter();
648 irq_enter();
649 nmi_cpu_backtrace(regs);
650 irq_exit();
651 printk_nmi_exit();
652 break;
653
654 default:
655 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
656 cpu, ipinr);
657 break;
658 }
659
660 if ((unsigned)ipinr < NR_IPI)
661 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
662 set_irq_regs(old_regs);
663}
664
665void smp_send_reschedule(int cpu)
666{
667 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
668}
669
670void smp_send_stop(void)
671{
672 unsigned long timeout;
673 struct cpumask mask;
674
675 cpumask_copy(&mask, cpu_online_mask);
676 cpumask_clear_cpu(smp_processor_id(), &mask);
677 if (!cpumask_empty(&mask))
678 smp_cross_call(&mask, IPI_CPU_STOP);
679
680 /* Wait up to one second for other CPUs to stop */
681 timeout = USEC_PER_SEC;
682 while (num_online_cpus() > 1 && timeout--)
683 udelay(1);
684
685 if (num_online_cpus() > 1)
686 pr_warn("SMP: failed to stop secondary CPUs\n");
687}
688
689/*
690 * not supported here
691 */
692int setup_profiling_timer(unsigned int multiplier)
693{
694 return -EINVAL;
695}
696
697#ifdef CONFIG_CPU_FREQ
698
699static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
700static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
701static unsigned long global_l_p_j_ref;
702static unsigned long global_l_p_j_ref_freq;
703
704static int cpufreq_callback(struct notifier_block *nb,
705 unsigned long val, void *data)
706{
707 struct cpufreq_freqs *freq = data;
708 int cpu = freq->cpu;
709
710 if (freq->flags & CPUFREQ_CONST_LOOPS)
711 return NOTIFY_OK;
712
713 if (!per_cpu(l_p_j_ref, cpu)) {
714 per_cpu(l_p_j_ref, cpu) =
715 per_cpu(cpu_data, cpu).loops_per_jiffy;
716 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
717 if (!global_l_p_j_ref) {
718 global_l_p_j_ref = loops_per_jiffy;
719 global_l_p_j_ref_freq = freq->old;
720 }
721 }
722
723 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
724 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
725 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
726 global_l_p_j_ref_freq,
727 freq->new);
728 per_cpu(cpu_data, cpu).loops_per_jiffy =
729 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
730 per_cpu(l_p_j_ref_freq, cpu),
731 freq->new);
732 }
733 return NOTIFY_OK;
734}
735
736static struct notifier_block cpufreq_notifier = {
737 .notifier_call = cpufreq_callback,
738};
739
740static int __init register_cpufreq_notifier(void)
741{
742 return cpufreq_register_notifier(&cpufreq_notifier,
743 CPUFREQ_TRANSITION_NOTIFIER);
744}
745core_initcall(register_cpufreq_notifier);
746
747#endif
748
749static void raise_nmi(cpumask_t *mask)
750{
751 /*
752 * Generate the backtrace directly if we are running in a calling
753 * context that is not preemptible by the backtrace IPI. Note
754 * that nmi_cpu_backtrace() automatically removes the current cpu
755 * from mask.
756 */
757 if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
758 nmi_cpu_backtrace(NULL);
759
760 smp_cross_call(mask, IPI_CPU_BACKTRACE);
761}
762
763void arch_trigger_all_cpu_backtrace(bool include_self)
764{
765 nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
766}
This page took 0.028135 seconds and 5 git commands to generate.