Commit | Line | Data |
---|---|---|
b2b9762f SS |
1 | /* |
2 | * OMAP44xx sleep code. | |
3 | * | |
4 | * Copyright (C) 2011 Texas Instruments, Inc. | |
5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | |
6 | * | |
7 | * This program is free software,you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/linkage.h> | |
6ebbf2ce | 13 | #include <asm/assembler.h> |
b2b9762f SS |
14 | #include <asm/smp_scu.h> |
15 | #include <asm/memory.h> | |
16 | #include <asm/hardware/cache-l2x0.h> | |
17 | ||
c1db9d73 | 18 | #include "omap-secure.h" |
b2b9762f SS |
19 | |
20 | #include "common.h" | |
c49f34bc | 21 | #include "omap44xx.h" |
b2b9762f SS |
22 | #include "omap4-sar-layout.h" |
23 | ||
24 | #if defined(CONFIG_SMP) && defined(CONFIG_PM) | |
25 | ||
26 | .macro DO_SMC | |
27 | dsb | |
28 | smc #0 | |
29 | dsb | |
30 | .endm | |
31 | ||
b46355a9 NM |
32 | #ifdef CONFIG_ARCH_OMAP4 |
33 | ||
b2b9762f SS |
34 | /* |
35 | * ============================= | |
36 | * == CPU suspend finisher == | |
37 | * ============================= | |
38 | * | |
39 | * void omap4_finish_suspend(unsigned long cpu_state) | |
40 | * | |
41 | * This function code saves the CPU context and performs the CPU | |
42 | * power down sequence. Calling WFI effectively changes the CPU | |
43 | * power domains states to the desired target power state. | |
44 | * | |
45 | * @cpu_state : contains context save state (r0) | |
46 | * 0 - No context lost | |
47 | * 1 - CPUx L1 and logic lost: MPUSS CSWR | |
48 | * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR | |
49 | * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF | |
50 | * @return: This function never returns for CPU OFF and DORMANT power states. | |
51 | * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up | |
52 | * from this follows a full CPU reset path via ROM code to CPU restore code. | |
53 | * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. | |
54 | * It returns to the caller for CPU INACTIVE and ON power states or in case | |
55 | * CPU failed to transition to targeted OFF/DORMANT state. | |
5b6e3eb5 SS |
56 | * |
57 | * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save | |
58 | * stack frame and it expects the caller to take care of it. Hence the entire | |
59 | * stack frame is saved to avoid possible stack corruption. | |
b2b9762f SS |
60 | */ |
61 | ENTRY(omap4_finish_suspend) | |
5b6e3eb5 | 62 | stmfd sp!, {r4-r12, lr} |
b2b9762f SS |
63 | cmp r0, #0x0 |
64 | beq do_WFI @ No lowpower state, jump to WFI | |
65 | ||
66 | /* | |
67 | * Flush all data from the L1 data cache before disabling | |
68 | * SCTLR.C bit. | |
69 | */ | |
70 | bl omap4_get_sar_ram_base | |
71 | ldr r9, [r0, #OMAP_TYPE_OFFSET] | |
72 | cmp r9, #0x1 @ Check for HS device | |
73 | bne skip_secure_l1_clean | |
74 | mov r0, #SCU_PM_NORMAL | |
75 | mov r1, #0xFF @ clean seucre L1 | |
76 | stmfd r13!, {r4-r12, r14} | |
77 | ldr r12, =OMAP4_MON_SCU_PWR_INDEX | |
78 | DO_SMC | |
79 | ldmfd r13!, {r4-r12, r14} | |
80 | skip_secure_l1_clean: | |
81 | bl v7_flush_dcache_all | |
82 | ||
83 | /* | |
84 | * Clear the SCTLR.C bit to prevent further data cache | |
85 | * allocation. Clearing SCTLR.C would make all the data accesses | |
86 | * strongly ordered and would not hit the cache. | |
87 | */ | |
88 | mrc p15, 0, r0, c1, c0, 0 | |
89 | bic r0, r0, #(1 << 2) @ Disable the C bit | |
90 | mcr p15, 0, r0, c1, c0, 0 | |
91 | isb | |
92 | ||
93 | /* | |
94 | * Invalidate L1 data cache. Even though only invalidate is | |
95 | * necessary exported flush API is used here. Doing clean | |
96 | * on already clean cache would be almost NOP. | |
97 | */ | |
98 | bl v7_flush_dcache_all | |
99 | ||
100 | /* | |
101 | * Switch the CPU from Symmetric Multiprocessing (SMP) mode | |
102 | * to AsymmetricMultiprocessing (AMP) mode by programming | |
103 | * the SCU power status to DORMANT or OFF mode. | |
104 | * This enables the CPU to be taken out of coherency by | |
105 | * preventing the CPU from receiving cache, TLB, or BTB | |
106 | * maintenance operations broadcast by other CPUs in the cluster. | |
107 | */ | |
108 | bl omap4_get_sar_ram_base | |
109 | mov r8, r0 | |
110 | ldr r9, [r8, #OMAP_TYPE_OFFSET] | |
111 | cmp r9, #0x1 @ Check for HS device | |
112 | bne scu_gp_set | |
113 | mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR | |
114 | ands r0, r0, #0x0f | |
115 | ldreq r0, [r8, #SCU_OFFSET0] | |
116 | ldrne r0, [r8, #SCU_OFFSET1] | |
117 | mov r1, #0x00 | |
118 | stmfd r13!, {r4-r12, r14} | |
119 | ldr r12, =OMAP4_MON_SCU_PWR_INDEX | |
120 | DO_SMC | |
121 | ldmfd r13!, {r4-r12, r14} | |
122 | b skip_scu_gp_set | |
123 | scu_gp_set: | |
124 | mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR | |
125 | ands r0, r0, #0x0f | |
126 | ldreq r1, [r8, #SCU_OFFSET0] | |
127 | ldrne r1, [r8, #SCU_OFFSET1] | |
128 | bl omap4_get_scu_base | |
129 | bl scu_power_mode | |
130 | skip_scu_gp_set: | |
131 | mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data | |
132 | tst r0, #(1 << 18) | |
133 | mrcne p15, 0, r0, c1, c0, 1 | |
134 | bicne r0, r0, #(1 << 6) @ Disable SMP bit | |
135 | mcrne p15, 0, r0, c1, c0, 1 | |
136 | isb | |
137 | dsb | |
5e94c6e3 SS |
138 | #ifdef CONFIG_CACHE_L2X0 |
139 | /* | |
140 | * Clean and invalidate the L2 cache. | |
141 | * Common cache-l2x0.c functions can't be used here since it | |
142 | * uses spinlocks. We are out of coherency here with data cache | |
143 | * disabled. The spinlock implementation uses exclusive load/store | |
144 | * instruction which can fail without data cache being enabled. | |
145 | * OMAP4 hardware doesn't support exclusive monitor which can | |
146 | * overcome exclusive access issue. Because of this, CPU can | |
147 | * lead to deadlock. | |
148 | */ | |
149 | bl omap4_get_sar_ram_base | |
150 | mov r8, r0 | |
151 | mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR | |
152 | ands r5, r5, #0x0f | |
153 | ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR | |
154 | ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory. | |
155 | cmp r0, #3 | |
156 | bne do_WFI | |
157 | #ifdef CONFIG_PL310_ERRATA_727915 | |
158 | mov r0, #0x03 | |
159 | mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX | |
160 | DO_SMC | |
161 | #endif | |
162 | bl omap4_get_l2cache_base | |
163 | mov r2, r0 | |
164 | ldr r0, =0xffff | |
165 | str r0, [r2, #L2X0_CLEAN_INV_WAY] | |
166 | wait: | |
167 | ldr r0, [r2, #L2X0_CLEAN_INV_WAY] | |
168 | ldr r1, =0xffff | |
169 | ands r0, r0, r1 | |
170 | bne wait | |
171 | #ifdef CONFIG_PL310_ERRATA_727915 | |
172 | mov r0, #0x00 | |
173 | mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX | |
174 | DO_SMC | |
175 | #endif | |
176 | l2x_sync: | |
177 | bl omap4_get_l2cache_base | |
178 | mov r2, r0 | |
179 | mov r0, #0x0 | |
180 | str r0, [r2, #L2X0_CACHE_SYNC] | |
181 | sync: | |
182 | ldr r0, [r2, #L2X0_CACHE_SYNC] | |
183 | ands r0, r0, #0x1 | |
184 | bne sync | |
185 | #endif | |
b2b9762f SS |
186 | |
187 | do_WFI: | |
188 | bl omap_do_wfi | |
189 | ||
190 | /* | |
191 | * CPU is here when it failed to enter OFF/DORMANT or | |
192 | * no low power state was attempted. | |
193 | */ | |
194 | mrc p15, 0, r0, c1, c0, 0 | |
195 | tst r0, #(1 << 2) @ Check C bit enabled? | |
196 | orreq r0, r0, #(1 << 2) @ Enable the C bit | |
197 | mcreq p15, 0, r0, c1, c0, 0 | |
198 | isb | |
199 | ||
200 | /* | |
201 | * Ensure the CPU power state is set to NORMAL in | |
202 | * SCU power state so that CPU is back in coherency. | |
203 | * In non-coherent mode CPU can lock-up and lead to | |
204 | * system deadlock. | |
205 | */ | |
206 | mrc p15, 0, r0, c1, c0, 1 | |
207 | tst r0, #(1 << 6) @ Check SMP bit enabled? | |
208 | orreq r0, r0, #(1 << 6) | |
209 | mcreq p15, 0, r0, c1, c0, 1 | |
210 | isb | |
211 | bl omap4_get_sar_ram_base | |
212 | mov r8, r0 | |
213 | ldr r9, [r8, #OMAP_TYPE_OFFSET] | |
214 | cmp r9, #0x1 @ Check for HS device | |
215 | bne scu_gp_clear | |
216 | mov r0, #SCU_PM_NORMAL | |
217 | mov r1, #0x00 | |
218 | stmfd r13!, {r4-r12, r14} | |
219 | ldr r12, =OMAP4_MON_SCU_PWR_INDEX | |
220 | DO_SMC | |
221 | ldmfd r13!, {r4-r12, r14} | |
222 | b skip_scu_gp_clear | |
223 | scu_gp_clear: | |
224 | bl omap4_get_scu_base | |
225 | mov r1, #SCU_PM_NORMAL | |
226 | bl scu_power_mode | |
227 | skip_scu_gp_clear: | |
228 | isb | |
229 | dsb | |
5b6e3eb5 | 230 | ldmfd sp!, {r4-r12, pc} |
b2b9762f SS |
231 | ENDPROC(omap4_finish_suspend) |
232 | ||
233 | /* | |
234 | * ============================ | |
235 | * == CPU resume entry point == | |
236 | * ============================ | |
237 | * | |
238 | * void omap4_cpu_resume(void) | |
239 | * | |
240 | * ROM code jumps to this function while waking up from CPU | |
241 | * OFF or DORMANT state. Physical address of the function is | |
242 | * stored in the SAR RAM while entering to OFF or DORMANT mode. | |
243 | * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. | |
244 | */ | |
245 | ENTRY(omap4_cpu_resume) | |
246 | /* | |
247 | * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. | |
248 | * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA | |
249 | * init and for CPU1, a secure PPA API provided. CPU0 must be ON | |
250 | * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+. | |
251 | * OMAP443X GP devices- SMP bit isn't accessible. | |
252 | * OMAP446X GP devices - SMP bit access is enabled on both CPUs. | |
253 | */ | |
254 | ldr r8, =OMAP44XX_SAR_RAM_BASE | |
255 | ldr r9, [r8, #OMAP_TYPE_OFFSET] | |
256 | cmp r9, #0x1 @ Skip if GP device | |
257 | bne skip_ns_smp_enable | |
258 | mrc p15, 0, r0, c0, c0, 5 | |
259 | ands r0, r0, #0x0f | |
260 | beq skip_ns_smp_enable | |
261 | ppa_actrl_retry: | |
262 | mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX | |
4da597d1 TL |
263 | adr r1, ppa_zero_params_offset |
264 | ldr r3, [r1] | |
265 | add r3, r3, r1 @ Pointer to ppa_zero_params | |
b2b9762f SS |
266 | mov r1, #0x0 @ Process ID |
267 | mov r2, #0x4 @ Flag | |
268 | mov r6, #0xff | |
269 | mov r12, #0x00 @ Secure Service ID | |
270 | DO_SMC | |
271 | cmp r0, #0x0 @ API returns 0 on success. | |
272 | beq enable_smp_bit | |
273 | b ppa_actrl_retry | |
274 | enable_smp_bit: | |
275 | mrc p15, 0, r0, c1, c0, 1 | |
276 | tst r0, #(1 << 6) @ Check SMP bit enabled? | |
277 | orreq r0, r0, #(1 << 6) | |
278 | mcreq p15, 0, r0, c1, c0, 1 | |
279 | isb | |
280 | skip_ns_smp_enable: | |
5e94c6e3 SS |
281 | #ifdef CONFIG_CACHE_L2X0 |
282 | /* | |
283 | * Restore the L2 AUXCTRL and enable the L2 cache. | |
284 | * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL | |
285 | * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL | |
286 | * register r0 contains value to be programmed. | |
287 | * L2 cache is already invalidate by ROM code as part | |
288 | * of MPUSS OFF wakeup path. | |
289 | */ | |
290 | ldr r2, =OMAP44XX_L2CACHE_BASE | |
291 | ldr r0, [r2, #L2X0_CTRL] | |
292 | and r0, #0x0f | |
293 | cmp r0, #1 | |
294 | beq skip_l2en @ Skip if already enabled | |
295 | ldr r3, =OMAP44XX_SAR_RAM_BASE | |
296 | ldr r1, [r3, #OMAP_TYPE_OFFSET] | |
297 | cmp r1, #0x1 @ Check for HS device | |
298 | bne set_gp_por | |
299 | ldr r0, =OMAP4_PPA_L2_POR_INDEX | |
300 | ldr r1, =OMAP44XX_SAR_RAM_BASE | |
301 | ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET] | |
4da597d1 TL |
302 | adr r1, ppa_por_params_offset |
303 | ldr r3, [r1] | |
304 | add r3, r3, r1 @ Pointer to ppa_por_params | |
5e94c6e3 SS |
305 | str r4, [r3, #0x04] |
306 | mov r1, #0x0 @ Process ID | |
307 | mov r2, #0x4 @ Flag | |
308 | mov r6, #0xff | |
309 | mov r12, #0x00 @ Secure Service ID | |
310 | DO_SMC | |
311 | b set_aux_ctrl | |
312 | set_gp_por: | |
313 | ldr r1, =OMAP44XX_SAR_RAM_BASE | |
314 | ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET] | |
315 | ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH | |
316 | DO_SMC | |
317 | set_aux_ctrl: | |
318 | ldr r1, =OMAP44XX_SAR_RAM_BASE | |
319 | ldr r0, [r1, #L2X0_AUXCTRL_OFFSET] | |
320 | ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL | |
321 | DO_SMC | |
322 | mov r0, #0x1 | |
323 | ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache | |
324 | DO_SMC | |
325 | skip_l2en: | |
326 | #endif | |
b2b9762f SS |
327 | |
328 | b cpu_resume @ Jump to generic resume | |
4da597d1 TL |
329 | ppa_por_params_offset: |
330 | .long ppa_por_params - . | |
b2b9762f | 331 | ENDPROC(omap4_cpu_resume) |
b46355a9 NM |
332 | #endif /* CONFIG_ARCH_OMAP4 */ |
333 | ||
334 | #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ | |
b2b9762f SS |
335 | |
336 | ENTRY(omap_do_wfi) | |
337 | stmfd sp!, {lr} | |
3fa60975 | 338 | #ifdef CONFIG_OMAP_INTERCONNECT_BARRIER |
137d105d | 339 | /* Drain interconnect write buffers. */ |
3fa60975 RK |
340 | bl omap_interconnect_sync |
341 | #endif | |
b2b9762f SS |
342 | |
343 | /* | |
344 | * Execute an ISB instruction to ensure that all of the | |
345 | * CP15 register changes have been committed. | |
346 | */ | |
347 | isb | |
348 | ||
349 | /* | |
350 | * Execute a barrier instruction to ensure that all cache, | |
351 | * TLB and branch predictor maintenance operations issued | |
352 | * by any CPU in the cluster have completed. | |
353 | */ | |
354 | dsb | |
355 | dmb | |
356 | ||
357 | /* | |
358 | * Execute a WFI instruction and wait until the | |
359 | * STANDBYWFI output is asserted to indicate that the | |
360 | * CPU is in idle and low power state. CPU can specualatively | |
361 | * prefetch the instructions so add NOPs after WFI. Sixteen | |
362 | * NOPs as per Cortex-A9 pipeline. | |
363 | */ | |
364 | wfi @ Wait For Interrupt | |
365 | nop | |
366 | nop | |
367 | nop | |
368 | nop | |
369 | nop | |
370 | nop | |
371 | nop | |
372 | nop | |
373 | nop | |
374 | nop | |
375 | nop | |
376 | nop | |
377 | nop | |
378 | nop | |
379 | nop | |
380 | nop | |
381 | ||
382 | ldmfd sp!, {pc} | |
4da597d1 TL |
383 | ppa_zero_params_offset: |
384 | .long ppa_zero_params - . | |
b2b9762f | 385 | ENDPROC(omap_do_wfi) |
4da597d1 TL |
386 | |
387 | .data | |
388 | ppa_zero_params: | |
389 | .word 0 | |
390 | ||
391 | ppa_por_params: | |
392 | .word 1, 0 |