ARM: 7291/1: cache: assume 64-byte L1 cachelines for ARMv7 CPUs
[deliverable/linux.git] / arch / arm / mach-vexpress / hotplug.c
CommitLineData
e9882777
RK
1/*
2 * linux/arch/arm/mach-realview/hotplug.c
3 *
4 * Copyright (C) 2002 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/smp.h>
14
15#include <asm/cacheflush.h>
b3377d18 16#include <asm/system.h>
e9882777
RK
17
18extern volatile int pen_release;
19
20static inline void cpu_enter_lowpower(void)
21{
22 unsigned int v;
23
24 flush_cache_all();
25 asm volatile(
26 "mcr p15, 0, %1, c7, c5, 0\n"
27 " mcr p15, 0, %1, c7, c10, 4\n"
28 /*
29 * Turn off coherency
30 */
31 " mrc p15, 0, %0, c1, c0, 1\n"
32 " bic %0, %0, %3\n"
33 " mcr p15, 0, %0, c1, c0, 1\n"
34 " mrc p15, 0, %0, c1, c0, 0\n"
35 " bic %0, %0, %2\n"
36 " mcr p15, 0, %0, c1, c0, 0\n"
37 : "=&r" (v)
38 : "r" (0), "Ir" (CR_C), "Ir" (0x40)
39 : "cc");
40}
41
42static inline void cpu_leave_lowpower(void)
43{
44 unsigned int v;
45
46 asm volatile(
47 "mrc p15, 0, %0, c1, c0, 0\n"
48 " orr %0, %0, %1\n"
49 " mcr p15, 0, %0, c1, c0, 0\n"
50 " mrc p15, 0, %0, c1, c0, 1\n"
51 " orr %0, %0, %2\n"
52 " mcr p15, 0, %0, c1, c0, 1\n"
53 : "=&r" (v)
54 : "Ir" (CR_C), "Ir" (0x40)
55 : "cc");
56}
57
58static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
59{
60 /*
61 * there is no power-control hardware on this platform, so all
62 * we can do is put the core into WFI; this is safe as the calling
63 * code will have already disabled interrupts
64 */
65 for (;;) {
b3377d18 66 wfi();
e9882777 67
4a139b64 68 if (pen_release == cpu_logical_map(cpu)) {
e9882777
RK
69 /*
70 * OK, proper wakeup, we're done
71 */
72 break;
73 }
74
75 /*
76 * Getting here, means that we have come out of WFI without
77 * having been woken up - this shouldn't happen
78 *
79 * Just note it happening - when we're woken, we can report
80 * its occurrence.
81 */
82 (*spurious)++;
83 }
84}
85
86int platform_cpu_kill(unsigned int cpu)
87{
88 return 1;
89}
90
91/*
92 * platform-specific code to shutdown a CPU
93 *
94 * Called with IRQs disabled
95 */
96void platform_cpu_die(unsigned int cpu)
97{
98 int spurious = 0;
99
100 /*
101 * we're ready for shutdown now, so do it
102 */
103 cpu_enter_lowpower();
104 platform_do_lowpower(cpu, &spurious);
105
106 /*
107 * bring this CPU back into the world of cache
108 * coherency, and then restore interrupts
109 */
110 cpu_leave_lowpower();
111
112 if (spurious)
113 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
114}
115
116int platform_cpu_disable(unsigned int cpu)
117{
118 /*
119 * we don't allow CPU 0 to be shutdown (it is still too special
120 * e.g. clock tick interrupts)
121 */
122 return cpu == 0 ? -EPERM : 0;
123}
This page took 0.090199 seconds and 5 git commands to generate.