Merge tag 'v3.8-rc1' into staging/for_v3.9
[deliverable/linux.git] / arch / arm / mach-tegra / headsmp.S
1 #include <linux/linkage.h>
2 #include <linux/init.h>
3
4 #include <asm/cache.h>
5 #include <asm/asm-offsets.h>
6 #include <asm/hardware/cache-l2x0.h>
7
8 #include "flowctrl.h"
9 #include "iomap.h"
10 #include "reset.h"
11 #include "sleep.h"
12
13 #define APB_MISC_GP_HIDREV 0x804
14 #define PMC_SCRATCH41 0x140
15
16 #define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
17
18 .section ".text.head", "ax"
19 __CPUINIT
20
21 /*
22 * Tegra specific entry point for secondary CPUs.
23 * The secondary kernel init calls v7_flush_dcache_all before it enables
24 * the L1; however, the L1 comes out of reset in an undefined state, so
25 * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
26 * of cache lines with uninitialized data and uninitialized tags to get
27 * written out to memory, which does really unpleasant things to the main
28 * processor. We fix this by performing an invalidate, rather than a
29 * clean + invalidate, before jumping into the kernel.
30 */
31 ENTRY(v7_invalidate_l1)
32 mov r0, #0
33 mcr p15, 2, r0, c0, c0, 0
34 mrc p15, 1, r0, c0, c0, 0
35
36 ldr r1, =0x7fff
37 and r2, r1, r0, lsr #13
38
39 ldr r1, =0x3ff
40
41 and r3, r1, r0, lsr #3 @ NumWays - 1
42 add r2, r2, #1 @ NumSets
43
44 and r0, r0, #0x7
45 add r0, r0, #4 @ SetShift
46
47 clz r1, r3 @ WayShift
48 add r4, r3, #1 @ NumWays
49 1: sub r2, r2, #1 @ NumSets--
50 mov r3, r4 @ Temp = NumWays
51 2: subs r3, r3, #1 @ Temp--
52 mov r5, r3, lsl r1
53 mov r6, r2, lsl r0
54 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
55 mcr p15, 0, r5, c7, c6, 2
56 bgt 2b
57 cmp r2, #0
58 bgt 1b
59 dsb
60 isb
61 mov pc, lr
62 ENDPROC(v7_invalidate_l1)
63
64
65 ENTRY(tegra_secondary_startup)
66 bl v7_invalidate_l1
67 /* Enable coresight */
68 mov32 r0, 0xC5ACCE55
69 mcr p14, 0, r0, c7, c12, 6
70 b secondary_startup
71 ENDPROC(tegra_secondary_startup)
72
73 #ifdef CONFIG_PM_SLEEP
74 /*
75 * tegra_resume
76 *
77 * CPU boot vector when restarting the a CPU following
78 * an LP2 transition. Also branched to by LP0 and LP1 resume after
79 * re-enabling sdram.
80 */
81 ENTRY(tegra_resume)
82 bl v7_invalidate_l1
83 /* Enable coresight */
84 mov32 r0, 0xC5ACCE55
85 mcr p14, 0, r0, c7, c12, 6
86
87 cpu_id r0
88 cmp r0, #0 @ CPU0?
89 bne cpu_resume @ no
90
91 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
92 /* Are we on Tegra20? */
93 mov32 r6, TEGRA_APB_MISC_BASE
94 ldr r0, [r6, #APB_MISC_GP_HIDREV]
95 and r0, r0, #0xff00
96 cmp r0, #(0x20 << 8)
97 beq 1f @ Yes
98 /* Clear the flow controller flags for this CPU. */
99 mov32 r2, TEGRA_FLOW_CTRL_BASE + FLOW_CTRL_CPU0_CSR @ CPU0 CSR
100 ldr r1, [r2]
101 /* Clear event & intr flag */
102 orr r1, r1, \
103 #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
104 movw r0, #0x0FFD @ enable, cluster_switch, immed, & bitmaps
105 bic r1, r1, r0
106 str r1, [r2]
107 1:
108 #endif
109
110 #ifdef CONFIG_HAVE_ARM_SCU
111 /* enable SCU */
112 mov32 r0, TEGRA_ARM_PERIF_BASE
113 ldr r1, [r0]
114 orr r1, r1, #1
115 str r1, [r0]
116 #endif
117
118 /* L2 cache resume & re-enable */
119 l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr
120
121 b cpu_resume
122 ENDPROC(tegra_resume)
123 #endif
124
125 #ifdef CONFIG_CACHE_L2X0
126 .globl l2x0_saved_regs_addr
127 l2x0_saved_regs_addr:
128 .long 0
129 #endif
130
131 .align L1_CACHE_SHIFT
132 ENTRY(__tegra_cpu_reset_handler_start)
133
134 /*
135 * __tegra_cpu_reset_handler:
136 *
137 * Common handler for all CPU reset events.
138 *
139 * Register usage within the reset handler:
140 *
141 * R7 = CPU present (to the OS) mask
142 * R8 = CPU in LP1 state mask
143 * R9 = CPU in LP2 state mask
144 * R10 = CPU number
145 * R11 = CPU mask
146 * R12 = pointer to reset handler data
147 *
148 * NOTE: This code is copied to IRAM. All code and data accesses
149 * must be position-independent.
150 */
151
152 .align L1_CACHE_SHIFT
153 ENTRY(__tegra_cpu_reset_handler)
154
155 cpsid aif, 0x13 @ SVC mode, interrupts disabled
156 mrc p15, 0, r10, c0, c0, 5 @ MPIDR
157 and r10, r10, #0x3 @ R10 = CPU number
158 mov r11, #1
159 mov r11, r11, lsl r10 @ R11 = CPU mask
160 adr r12, __tegra_cpu_reset_handler_data
161
162 #ifdef CONFIG_SMP
163 /* Does the OS know about this CPU? */
164 ldr r7, [r12, #RESET_DATA(MASK_PRESENT)]
165 tst r7, r11 @ if !present
166 bleq __die @ CPU not present (to OS)
167 #endif
168
169 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
170 /* Are we on Tegra20? */
171 mov32 r6, TEGRA_APB_MISC_BASE
172 ldr r0, [r6, #APB_MISC_GP_HIDREV]
173 and r0, r0, #0xff00
174 cmp r0, #(0x20 << 8)
175 bne 1f
176 /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
177 mov32 r6, TEGRA_PMC_BASE
178 mov r0, #0
179 cmp r10, #0
180 strne r0, [r6, #PMC_SCRATCH41]
181 1:
182 #endif
183
184 /* Waking up from LP2? */
185 ldr r9, [r12, #RESET_DATA(MASK_LP2)]
186 tst r9, r11 @ if in_lp2
187 beq __is_not_lp2
188 ldr lr, [r12, #RESET_DATA(STARTUP_LP2)]
189 cmp lr, #0
190 bleq __die @ no LP2 startup handler
191 bx lr
192
193 __is_not_lp2:
194
195 #ifdef CONFIG_SMP
196 /*
197 * Can only be secondary boot (initial or hotplug) but CPU 0
198 * cannot be here.
199 */
200 cmp r10, #0
201 bleq __die @ CPU0 cannot be here
202 ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)]
203 cmp lr, #0
204 bleq __die @ no secondary startup handler
205 bx lr
206 #endif
207
208 /*
209 * We don't know why the CPU reset. Just kill it.
210 * The LR register will contain the address we died at + 4.
211 */
212
213 __die:
214 sub lr, lr, #4
215 mov32 r7, TEGRA_PMC_BASE
216 str lr, [r7, #PMC_SCRATCH41]
217
218 mov32 r7, TEGRA_CLK_RESET_BASE
219
220 /* Are we on Tegra20? */
221 mov32 r6, TEGRA_APB_MISC_BASE
222 ldr r0, [r6, #APB_MISC_GP_HIDREV]
223 and r0, r0, #0xff00
224 cmp r0, #(0x20 << 8)
225 bne 1f
226
227 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
228 mov32 r0, 0x1111
229 mov r1, r0, lsl r10
230 str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET
231 #endif
232 1:
233 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
234 mov32 r6, TEGRA_FLOW_CTRL_BASE
235
236 cmp r10, #0
237 moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS
238 moveq r2, #FLOW_CTRL_CPU0_CSR
239 movne r1, r10, lsl #3
240 addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8)
241 addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8)
242
243 /* Clear CPU "event" and "interrupt" flags and power gate
244 it when halting but not before it is in the "WFI" state. */
245 ldr r0, [r6, +r2]
246 orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
247 orr r0, r0, #FLOW_CTRL_CSR_ENABLE
248 str r0, [r6, +r2]
249
250 /* Unconditionally halt this CPU */
251 mov r0, #FLOW_CTRL_WAITEVENT
252 str r0, [r6, +r1]
253 ldr r0, [r6, +r1] @ memory barrier
254
255 dsb
256 isb
257 wfi @ CPU should be power gated here
258
259 /* If the CPU didn't power gate above just kill it's clock. */
260
261 mov r0, r11, lsl #8
262 str r0, [r7, #348] @ CLK_CPU_CMPLX_SET
263 #endif
264
265 /* If the CPU still isn't dead, just spin here. */
266 b .
267 ENDPROC(__tegra_cpu_reset_handler)
268
269 .align L1_CACHE_SHIFT
270 .type __tegra_cpu_reset_handler_data, %object
271 .globl __tegra_cpu_reset_handler_data
272 __tegra_cpu_reset_handler_data:
273 .rept TEGRA_RESET_DATA_SIZE
274 .long 0
275 .endr
276 .align L1_CACHE_SHIFT
277
278 ENTRY(__tegra_cpu_reset_handler_end)
This page took 0.037363 seconds and 5 git commands to generate.