Merge branch 'v4.8/defconfig' into tmp/aml-rebuild
[deliverable/linux.git] / arch / arm / mach-imx / suspend-imx6.S
1 /*
2 * Copyright 2014 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12 #include <linux/linkage.h>
13 #include <asm/assembler.h>
14 #include <asm/asm-offsets.h>
15 #include <asm/hardware/cache-l2x0.h>
16 #include "hardware.h"
17
18 /*
19 * ==================== low level suspend ====================
20 *
21 * Better to follow below rules to use ARM registers:
22 * r0: pm_info structure address;
23 * r1 ~ r4: for saving pm_info members;
24 * r5 ~ r10: free registers;
25 * r11: io base address.
26 *
27 * suspend ocram space layout:
28 * ======================== high address ======================
29 * .
30 * .
31 * .
32 * ^
33 * ^
34 * ^
35 * imx6_suspend code
36 * PM_INFO structure(imx6_cpu_pm_info)
37 * ======================== low address =======================
38 */
39
40 /*
41 * Below offsets are based on struct imx6_cpu_pm_info
42 * which defined in arch/arm/mach-imx/pm-imx6q.c, this
43 * structure contains necessary pm info for low level
44 * suspend related code.
45 */
46 #define PM_INFO_PBASE_OFFSET 0x0
47 #define PM_INFO_RESUME_ADDR_OFFSET 0x4
48 #define PM_INFO_DDR_TYPE_OFFSET 0x8
49 #define PM_INFO_PM_INFO_SIZE_OFFSET 0xC
50 #define PM_INFO_MX6Q_MMDC_P_OFFSET 0x10
51 #define PM_INFO_MX6Q_MMDC_V_OFFSET 0x14
52 #define PM_INFO_MX6Q_SRC_P_OFFSET 0x18
53 #define PM_INFO_MX6Q_SRC_V_OFFSET 0x1C
54 #define PM_INFO_MX6Q_IOMUXC_P_OFFSET 0x20
55 #define PM_INFO_MX6Q_IOMUXC_V_OFFSET 0x24
56 #define PM_INFO_MX6Q_CCM_P_OFFSET 0x28
57 #define PM_INFO_MX6Q_CCM_V_OFFSET 0x2C
58 #define PM_INFO_MX6Q_GPC_P_OFFSET 0x30
59 #define PM_INFO_MX6Q_GPC_V_OFFSET 0x34
60 #define PM_INFO_MX6Q_L2_P_OFFSET 0x38
61 #define PM_INFO_MX6Q_L2_V_OFFSET 0x3C
62 #define PM_INFO_MMDC_IO_NUM_OFFSET 0x40
63 #define PM_INFO_MMDC_IO_VAL_OFFSET 0x44
64
65 #define MX6Q_SRC_GPR1 0x20
66 #define MX6Q_SRC_GPR2 0x24
67 #define MX6Q_MMDC_MAPSR 0x404
68 #define MX6Q_MMDC_MPDGCTRL0 0x83c
69 #define MX6Q_GPC_IMR1 0x08
70 #define MX6Q_GPC_IMR2 0x0c
71 #define MX6Q_GPC_IMR3 0x10
72 #define MX6Q_GPC_IMR4 0x14
73 #define MX6Q_CCM_CCR 0x0
74
75 .align 3
76
77 .macro sync_l2_cache
78
79 /* sync L2 cache to drain L2's buffers to DRAM. */
80 #ifdef CONFIG_CACHE_L2X0
81 ldr r11, [r0, #PM_INFO_MX6Q_L2_V_OFFSET]
82 teq r11, #0
83 beq 6f
84 mov r6, #0x0
85 str r6, [r11, #L2X0_CACHE_SYNC]
86 1:
87 ldr r6, [r11, #L2X0_CACHE_SYNC]
88 ands r6, r6, #0x1
89 bne 1b
90 6:
91 #endif
92
93 .endm
94
95 .macro resume_mmdc
96
97 /* restore MMDC IO */
98 cmp r5, #0x0
99 ldreq r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
100 ldrne r11, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET]
101
102 ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
103 ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET
104 add r7, r7, r0
105 1:
106 ldr r8, [r7], #0x4
107 ldr r9, [r7], #0x4
108 str r9, [r11, r8]
109 subs r6, r6, #0x1
110 bne 1b
111
112 cmp r5, #0x0
113 ldreq r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
114 ldrne r11, [r0, #PM_INFO_MX6Q_MMDC_P_OFFSET]
115
116 cmp r3, #IMX_DDR_TYPE_LPDDR2
117 bne 4f
118
119 /* reset read FIFO, RST_RD_FIFO */
120 ldr r7, =MX6Q_MMDC_MPDGCTRL0
121 ldr r6, [r11, r7]
122 orr r6, r6, #(1 << 31)
123 str r6, [r11, r7]
124 2:
125 ldr r6, [r11, r7]
126 ands r6, r6, #(1 << 31)
127 bne 2b
128
129 /* reset FIFO a second time */
130 ldr r6, [r11, r7]
131 orr r6, r6, #(1 << 31)
132 str r6, [r11, r7]
133 3:
134 ldr r6, [r11, r7]
135 ands r6, r6, #(1 << 31)
136 bne 3b
137 4:
138 /* let DDR out of self-refresh */
139 ldr r7, [r11, #MX6Q_MMDC_MAPSR]
140 bic r7, r7, #(1 << 21)
141 str r7, [r11, #MX6Q_MMDC_MAPSR]
142 5:
143 ldr r7, [r11, #MX6Q_MMDC_MAPSR]
144 ands r7, r7, #(1 << 25)
145 bne 5b
146
147 /* enable DDR auto power saving */
148 ldr r7, [r11, #MX6Q_MMDC_MAPSR]
149 bic r7, r7, #0x1
150 str r7, [r11, #MX6Q_MMDC_MAPSR]
151
152 .endm
153
154 ENTRY(imx6_suspend)
155 ldr r1, [r0, #PM_INFO_PBASE_OFFSET]
156 ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
157 ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET]
158 ldr r4, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET]
159
160 /*
161 * counting the resume address in iram
162 * to set it in SRC register.
163 */
164 ldr r6, =imx6_suspend
165 ldr r7, =resume
166 sub r7, r7, r6
167 add r8, r1, r4
168 add r9, r8, r7
169
170 /*
171 * make sure TLB contain the addr we want,
172 * as we will access them after MMDC IO floated.
173 */
174
175 ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
176 ldr r6, [r11, #0x0]
177 ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
178 ldr r6, [r11, #0x0]
179 ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
180 ldr r6, [r11, #0x0]
181
182 /* use r11 to store the IO address */
183 ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET]
184 /* store physical resume addr and pm_info address. */
185 str r9, [r11, #MX6Q_SRC_GPR1]
186 str r1, [r11, #MX6Q_SRC_GPR2]
187
188 /* need to sync L2 cache before DSM. */
189 sync_l2_cache
190
191 ldr r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
192 /*
193 * put DDR explicitly into self-refresh and
194 * disable automatic power savings.
195 */
196 ldr r7, [r11, #MX6Q_MMDC_MAPSR]
197 orr r7, r7, #0x1
198 str r7, [r11, #MX6Q_MMDC_MAPSR]
199
200 /* make the DDR explicitly enter self-refresh. */
201 ldr r7, [r11, #MX6Q_MMDC_MAPSR]
202 orr r7, r7, #(1 << 21)
203 str r7, [r11, #MX6Q_MMDC_MAPSR]
204
205 poll_dvfs_set:
206 ldr r7, [r11, #MX6Q_MMDC_MAPSR]
207 ands r7, r7, #(1 << 25)
208 beq poll_dvfs_set
209
210 ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
211 ldr r6, =0x0
212 ldr r7, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
213 ldr r8, =PM_INFO_MMDC_IO_VAL_OFFSET
214 add r8, r8, r0
215 /* LPDDR2's last 3 IOs need special setting */
216 cmp r3, #IMX_DDR_TYPE_LPDDR2
217 subeq r7, r7, #0x3
218 set_mmdc_io_lpm:
219 ldr r9, [r8], #0x8
220 str r6, [r11, r9]
221 subs r7, r7, #0x1
222 bne set_mmdc_io_lpm
223
224 cmp r3, #IMX_DDR_TYPE_LPDDR2
225 bne set_mmdc_io_lpm_done
226 ldr r6, =0x1000
227 ldr r9, [r8], #0x8
228 str r6, [r11, r9]
229 ldr r9, [r8], #0x8
230 str r6, [r11, r9]
231 ldr r6, =0x80000
232 ldr r9, [r8]
233 str r6, [r11, r9]
234 set_mmdc_io_lpm_done:
235
236 /*
237 * mask all GPC interrupts before
238 * enabling the RBC counters to
239 * avoid the counter starting too
240 * early if an interupt is already
241 * pending.
242 */
243 ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
244 ldr r6, [r11, #MX6Q_GPC_IMR1]
245 ldr r7, [r11, #MX6Q_GPC_IMR2]
246 ldr r8, [r11, #MX6Q_GPC_IMR3]
247 ldr r9, [r11, #MX6Q_GPC_IMR4]
248
249 ldr r10, =0xffffffff
250 str r10, [r11, #MX6Q_GPC_IMR1]
251 str r10, [r11, #MX6Q_GPC_IMR2]
252 str r10, [r11, #MX6Q_GPC_IMR3]
253 str r10, [r11, #MX6Q_GPC_IMR4]
254
255 /*
256 * enable the RBC bypass counter here
257 * to hold off the interrupts. RBC counter
258 * = 32 (1ms), Minimum RBC delay should be
259 * 400us for the analog LDOs to power down.
260 */
261 ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
262 ldr r10, [r11, #MX6Q_CCM_CCR]
263 bic r10, r10, #(0x3f << 21)
264 orr r10, r10, #(0x20 << 21)
265 str r10, [r11, #MX6Q_CCM_CCR]
266
267 /* enable the counter. */
268 ldr r10, [r11, #MX6Q_CCM_CCR]
269 orr r10, r10, #(0x1 << 27)
270 str r10, [r11, #MX6Q_CCM_CCR]
271
272 /* unmask all the GPC interrupts. */
273 ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
274 str r6, [r11, #MX6Q_GPC_IMR1]
275 str r7, [r11, #MX6Q_GPC_IMR2]
276 str r8, [r11, #MX6Q_GPC_IMR3]
277 str r9, [r11, #MX6Q_GPC_IMR4]
278
279 /*
280 * now delay for a short while (3usec)
281 * ARM is at 1GHz at this point
282 * so a short loop should be enough.
283 * this delay is required to ensure that
284 * the RBC counter can start counting in
285 * case an interrupt is already pending
286 * or in case an interrupt arrives just
287 * as ARM is about to assert DSM_request.
288 */
289 ldr r6, =2000
290 rbc_loop:
291 subs r6, r6, #0x1
292 bne rbc_loop
293
294 /* Zzz, enter stop mode */
295 wfi
296 nop
297 nop
298 nop
299 nop
300
301 /*
302 * run to here means there is pending
303 * wakeup source, system should auto
304 * resume, we need to restore MMDC IO first
305 */
306 mov r5, #0x0
307 resume_mmdc
308
309 /* return to suspend finish */
310 ret lr
311
312 resume:
313 /* invalidate L1 I-cache first */
314 mov r6, #0x0
315 mcr p15, 0, r6, c7, c5, 0
316 mcr p15, 0, r6, c7, c5, 6
317 /* enable the Icache and branch prediction */
318 mov r6, #0x1800
319 mcr p15, 0, r6, c1, c0, 0
320 isb
321
322 /* get physical resume address from pm_info. */
323 ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
324 /* clear core0's entry and parameter */
325 ldr r11, [r0, #PM_INFO_MX6Q_SRC_P_OFFSET]
326 mov r7, #0x0
327 str r7, [r11, #MX6Q_SRC_GPR1]
328 str r7, [r11, #MX6Q_SRC_GPR2]
329
330 ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET]
331 mov r5, #0x1
332 resume_mmdc
333
334 ret lr
335 ENDPROC(imx6_suspend)
336
337 /*
338 * The following code must assume it is running from physical address
339 * where absolute virtual addresses to the data section have to be
340 * turned into relative ones.
341 */
342
343 ENTRY(v7_cpu_resume)
344 bl v7_invalidate_l1
345 #ifdef CONFIG_CACHE_L2X0
346 bl l2c310_early_resume
347 #endif
348 b cpu_resume
349 ENDPROC(v7_cpu_resume)
This page took 0.045374 seconds and 5 git commands to generate.