ARM: kirkwood: Trim excess #includes in board-dnskw.c
[deliverable/linux.git] / arch / arm / mach-tegra / headsmp.S
1 #include <linux/linkage.h>
2 #include <linux/init.h>
3
4 #include <asm/cache.h>
5
6 #include <mach/iomap.h>
7
8 #include "flowctrl.h"
9 #include "reset.h"
10
11 #define APB_MISC_GP_HIDREV 0x804
12 #define PMC_SCRATCH41 0x140
13
14 #define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
15
16 .macro mov32, reg, val
17 movw \reg, #:lower16:\val
18 movt \reg, #:upper16:\val
19 .endm
20
21 .section ".text.head", "ax"
22 __CPUINIT
23
24 /*
25 * Tegra specific entry point for secondary CPUs.
26 * The secondary kernel init calls v7_flush_dcache_all before it enables
27 * the L1; however, the L1 comes out of reset in an undefined state, so
28 * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
29 * of cache lines with uninitialized data and uninitialized tags to get
30 * written out to memory, which does really unpleasant things to the main
31 * processor. We fix this by performing an invalidate, rather than a
32 * clean + invalidate, before jumping into the kernel.
33 */
34 ENTRY(v7_invalidate_l1)
35 mov r0, #0
36 mcr p15, 2, r0, c0, c0, 0
37 mrc p15, 1, r0, c0, c0, 0
38
39 ldr r1, =0x7fff
40 and r2, r1, r0, lsr #13
41
42 ldr r1, =0x3ff
43
44 and r3, r1, r0, lsr #3 @ NumWays - 1
45 add r2, r2, #1 @ NumSets
46
47 and r0, r0, #0x7
48 add r0, r0, #4 @ SetShift
49
50 clz r1, r3 @ WayShift
51 add r4, r3, #1 @ NumWays
52 1: sub r2, r2, #1 @ NumSets--
53 mov r3, r4 @ Temp = NumWays
54 2: subs r3, r3, #1 @ Temp--
55 mov r5, r3, lsl r1
56 mov r6, r2, lsl r0
57 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
58 mcr p15, 0, r5, c7, c6, 2
59 bgt 2b
60 cmp r2, #0
61 bgt 1b
62 dsb
63 isb
64 mov pc, lr
65 ENDPROC(v7_invalidate_l1)
66
67
68 ENTRY(tegra_secondary_startup)
69 bl v7_invalidate_l1
70 /* Enable coresight */
71 mov32 r0, 0xC5ACCE55
72 mcr p14, 0, r0, c7, c12, 6
73 b secondary_startup
74 ENDPROC(tegra_secondary_startup)
75
76 .align L1_CACHE_SHIFT
77 ENTRY(__tegra_cpu_reset_handler_start)
78
79 /*
80 * __tegra_cpu_reset_handler:
81 *
82 * Common handler for all CPU reset events.
83 *
84 * Register usage within the reset handler:
85 *
86 * R7 = CPU present (to the OS) mask
87 * R8 = CPU in LP1 state mask
88 * R9 = CPU in LP2 state mask
89 * R10 = CPU number
90 * R11 = CPU mask
91 * R12 = pointer to reset handler data
92 *
93 * NOTE: This code is copied to IRAM. All code and data accesses
94 * must be position-independent.
95 */
96
97 .align L1_CACHE_SHIFT
98 ENTRY(__tegra_cpu_reset_handler)
99
100 cpsid aif, 0x13 @ SVC mode, interrupts disabled
101 mrc p15, 0, r10, c0, c0, 5 @ MPIDR
102 and r10, r10, #0x3 @ R10 = CPU number
103 mov r11, #1
104 mov r11, r11, lsl r10 @ R11 = CPU mask
105 adr r12, __tegra_cpu_reset_handler_data
106
107 #ifdef CONFIG_SMP
108 /* Does the OS know about this CPU? */
109 ldr r7, [r12, #RESET_DATA(MASK_PRESENT)]
110 tst r7, r11 @ if !present
111 bleq __die @ CPU not present (to OS)
112 #endif
113
114 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
115 /* Are we on Tegra20? */
116 mov32 r6, TEGRA_APB_MISC_BASE
117 ldr r0, [r6, #APB_MISC_GP_HIDREV]
118 and r0, r0, #0xff00
119 cmp r0, #(0x20 << 8)
120 bne 1f
121 /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
122 mov32 r6, TEGRA_PMC_BASE
123 mov r0, #0
124 cmp r10, #0
125 strne r0, [r6, #PMC_SCRATCH41]
126 1:
127 #endif
128
129 #ifdef CONFIG_SMP
130 /*
131 * Can only be secondary boot (initial or hotplug) but CPU 0
132 * cannot be here.
133 */
134 cmp r10, #0
135 bleq __die @ CPU0 cannot be here
136 ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)]
137 cmp lr, #0
138 bleq __die @ no secondary startup handler
139 bx lr
140 #endif
141
142 /*
143 * We don't know why the CPU reset. Just kill it.
144 * The LR register will contain the address we died at + 4.
145 */
146
147 __die:
148 sub lr, lr, #4
149 mov32 r7, TEGRA_PMC_BASE
150 str lr, [r7, #PMC_SCRATCH41]
151
152 mov32 r7, TEGRA_CLK_RESET_BASE
153
154 /* Are we on Tegra20? */
155 mov32 r6, TEGRA_APB_MISC_BASE
156 ldr r0, [r6, #APB_MISC_GP_HIDREV]
157 and r0, r0, #0xff00
158 cmp r0, #(0x20 << 8)
159 bne 1f
160
161 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
162 mov32 r0, 0x1111
163 mov r1, r0, lsl r10
164 str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET
165 #endif
166 1:
167 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
168 mov32 r6, TEGRA_FLOW_CTRL_BASE
169
170 cmp r10, #0
171 moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS
172 moveq r2, #FLOW_CTRL_CPU0_CSR
173 movne r1, r10, lsl #3
174 addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8)
175 addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8)
176
177 /* Clear CPU "event" and "interrupt" flags and power gate
178 it when halting but not before it is in the "WFI" state. */
179 ldr r0, [r6, +r2]
180 orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
181 orr r0, r0, #FLOW_CTRL_CSR_ENABLE
182 str r0, [r6, +r2]
183
184 /* Unconditionally halt this CPU */
185 mov r0, #FLOW_CTRL_WAITEVENT
186 str r0, [r6, +r1]
187 ldr r0, [r6, +r1] @ memory barrier
188
189 dsb
190 isb
191 wfi @ CPU should be power gated here
192
193 /* If the CPU didn't power gate above just kill it's clock. */
194
195 mov r0, r11, lsl #8
196 str r0, [r7, #348] @ CLK_CPU_CMPLX_SET
197 #endif
198
199 /* If the CPU still isn't dead, just spin here. */
200 b .
201 ENDPROC(__tegra_cpu_reset_handler)
202
203 .align L1_CACHE_SHIFT
204 .type __tegra_cpu_reset_handler_data, %object
205 .globl __tegra_cpu_reset_handler_data
206 __tegra_cpu_reset_handler_data:
207 .rept TEGRA_RESET_DATA_SIZE
208 .long 0
209 .endr
210 .align L1_CACHE_SHIFT
211
212 ENTRY(__tegra_cpu_reset_handler_end)
This page took 0.036724 seconds and 5 git commands to generate.