TI816X: Update common OMAP machine specific sources
[deliverable/linux.git] / arch / arm / mach-omap2 / sleep34xx.S
1 /*
2 * (C) Copyright 2007
3 * Texas Instruments
4 * Karthik Dasu <karthik-dp@ti.com>
5 *
6 * (C) Copyright 2004
7 * Texas Instruments, <www.ti.com>
8 * Richard Woodruff <r-woodruff2@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of
13 * the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23 * MA 02111-1307 USA
24 */
25 #include <linux/linkage.h>
26 #include <asm/assembler.h>
27 #include <plat/sram.h>
28 #include <mach/io.h>
29
30 #include "cm2xxx_3xxx.h"
31 #include "prm2xxx_3xxx.h"
32 #include "sdrc.h"
33 #include "control.h"
34
35 /*
36 * Registers access definitions
37 */
38 #define SDRC_SCRATCHPAD_SEM_OFFS 0xc
39 #define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\
40 (SDRC_SCRATCHPAD_SEM_OFFS)
41 #define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\
42 OMAP3430_PM_PREPWSTST
43 #define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
44 #define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
45 #define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
46 #define SRAM_BASE_P OMAP3_SRAM_PA
47 #define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
48 #define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\
49 OMAP36XX_CONTROL_MEM_RTA_CTRL)
50
51 /* Move this as correct place is available */
52 #define SCRATCHPAD_MEM_OFFS 0x310
53 #define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\
54 OMAP343X_CONTROL_MEM_WKUP +\
55 SCRATCHPAD_MEM_OFFS)
56 #define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
57 #define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
58 #define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0)
59 #define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0)
60 #define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
61 #define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1)
62 #define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1)
63 #define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
64 #define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
65 #define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
66
67
68 /*
69 * API functions
70 */
71
72 /*
73 * The "get_*restore_pointer" functions are used to provide a
74 * physical restore address where the ROM code jumps while waking
75 * up from MPU OFF/OSWR state.
76 * The restore pointer is stored into the scratchpad.
77 */
78
79 .text
80 /* Function call to get the restore pointer for resume from OFF */
81 ENTRY(get_restore_pointer)
82 stmfd sp!, {lr} @ save registers on stack
83 adr r0, restore
84 ldmfd sp!, {pc} @ restore regs and return
85 ENTRY(get_restore_pointer_sz)
86 .word . - get_restore_pointer
87
88 .text
89 /* Function call to get the restore pointer for 3630 resume from OFF */
90 ENTRY(get_omap3630_restore_pointer)
91 stmfd sp!, {lr} @ save registers on stack
92 adr r0, restore_3630
93 ldmfd sp!, {pc} @ restore regs and return
94 ENTRY(get_omap3630_restore_pointer_sz)
95 .word . - get_omap3630_restore_pointer
96
97 .text
98 /* Function call to get the restore pointer for ES3 to resume from OFF */
99 ENTRY(get_es3_restore_pointer)
100 stmfd sp!, {lr} @ save registers on stack
101 adr r0, restore_es3
102 ldmfd sp!, {pc} @ restore regs and return
103 ENTRY(get_es3_restore_pointer_sz)
104 .word . - get_es3_restore_pointer
105
106 .text
107 /*
108 * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
109 * This function sets up a flag that will allow for this toggling to take
110 * place on 3630. Hopefully some version in the future may not need this.
111 */
112 ENTRY(enable_omap3630_toggle_l2_on_restore)
113 stmfd sp!, {lr} @ save registers on stack
114 /* Setup so that we will disable and enable l2 */
115 mov r1, #0x1
116 str r1, l2dis_3630
117 ldmfd sp!, {pc} @ restore regs and return
118
119 .text
120 /* Function to call rom code to save secure ram context */
121 ENTRY(save_secure_ram_context)
122 stmfd sp!, {r1-r12, lr} @ save registers on stack
123 adr r3, api_params @ r3 points to parameters
124 str r0, [r3,#0x4] @ r0 has sdram address
125 ldr r12, high_mask
126 and r3, r3, r12
127 ldr r12, sram_phy_addr_mask
128 orr r3, r3, r12
129 mov r0, #25 @ set service ID for PPA
130 mov r12, r0 @ copy secure service ID in r12
131 mov r1, #0 @ set task id for ROM code in r1
132 mov r2, #4 @ set some flags in r2, r6
133 mov r6, #0xff
134 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
135 mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
136 .word 0xE1600071 @ call SMI monitor (smi #1)
137 nop
138 nop
139 nop
140 nop
141 ldmfd sp!, {r1-r12, pc}
142 sram_phy_addr_mask:
143 .word SRAM_BASE_P
144 high_mask:
145 .word 0xffff
146 api_params:
147 .word 0x4, 0x0, 0x0, 0x1, 0x1
148 ENTRY(save_secure_ram_context_sz)
149 .word . - save_secure_ram_context
150
151 /*
152 * ======================
153 * == Idle entry point ==
154 * ======================
155 */
156
157 /*
158 * Forces OMAP into idle state
159 *
160 * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
161 * and executes the WFI instruction. Calling WFI effectively changes the
162 * power domains states to the desired target power states.
163 *
164 *
165 * Notes:
166 * - this code gets copied to internal SRAM at boot and after wake-up
167 * from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
168 * - when the OMAP wakes up it continues at different execution points
169 * depending on the low power mode (non-OFF vs OFF modes),
170 * cf. 'Resume path for xxx mode' comments.
171 */
172 ENTRY(omap34xx_cpu_suspend)
173 stmfd sp!, {r0-r12, lr} @ save registers on stack
174
175 /*
176 * r0 contains restore pointer in sdram
177 * r1 contains information about saving context:
178 * 0 - No context lost
179 * 1 - Only L1 and logic lost
180 * 2 - Only L2 lost
181 * 3 - Both L1 and L2 lost
182 */
183
184 /* Directly jump to WFI is the context save is not required */
185 cmp r1, #0x0
186 beq omap3_do_wfi
187
188 /* Otherwise fall through to the save context code */
189 save_context_wfi:
190 mov r8, r0 @ Store SDRAM address in r8
191 mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
192 mov r4, #0x1 @ Number of parameters for restore call
193 stmia r8!, {r4-r5} @ Push parameters for restore call
194 mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register
195 stmia r8!, {r4-r5} @ Push parameters for restore call
196
197 /* Check what that target sleep state is from r1 */
198 cmp r1, #0x2 @ Only L2 lost, no need to save context
199 beq clean_caches
200
201 l1_logic_lost:
202 /* Store sp and spsr to SDRAM */
203 mov r4, sp
204 mrs r5, spsr
205 mov r6, lr
206 stmia r8!, {r4-r6}
207 /* Save all ARM registers */
208 /* Coprocessor access control register */
209 mrc p15, 0, r6, c1, c0, 2
210 stmia r8!, {r6}
211 /* TTBR0, TTBR1 and Translation table base control */
212 mrc p15, 0, r4, c2, c0, 0
213 mrc p15, 0, r5, c2, c0, 1
214 mrc p15, 0, r6, c2, c0, 2
215 stmia r8!, {r4-r6}
216 /*
217 * Domain access control register, data fault status register,
218 * and instruction fault status register
219 */
220 mrc p15, 0, r4, c3, c0, 0
221 mrc p15, 0, r5, c5, c0, 0
222 mrc p15, 0, r6, c5, c0, 1
223 stmia r8!, {r4-r6}
224 /*
225 * Data aux fault status register, instruction aux fault status,
226 * data fault address register and instruction fault address register
227 */
228 mrc p15, 0, r4, c5, c1, 0
229 mrc p15, 0, r5, c5, c1, 1
230 mrc p15, 0, r6, c6, c0, 0
231 mrc p15, 0, r7, c6, c0, 2
232 stmia r8!, {r4-r7}
233 /*
234 * user r/w thread and process ID, user r/o thread and process ID,
235 * priv only thread and process ID, cache size selection
236 */
237 mrc p15, 0, r4, c13, c0, 2
238 mrc p15, 0, r5, c13, c0, 3
239 mrc p15, 0, r6, c13, c0, 4
240 mrc p15, 2, r7, c0, c0, 0
241 stmia r8!, {r4-r7}
242 /* Data TLB lockdown, instruction TLB lockdown registers */
243 mrc p15, 0, r5, c10, c0, 0
244 mrc p15, 0, r6, c10, c0, 1
245 stmia r8!, {r5-r6}
246 /* Secure or non secure vector base address, FCSE PID, Context PID*/
247 mrc p15, 0, r4, c12, c0, 0
248 mrc p15, 0, r5, c13, c0, 0
249 mrc p15, 0, r6, c13, c0, 1
250 stmia r8!, {r4-r6}
251 /* Primary remap, normal remap registers */
252 mrc p15, 0, r4, c10, c2, 0
253 mrc p15, 0, r5, c10, c2, 1
254 stmia r8!,{r4-r5}
255
256 /* Store current cpsr*/
257 mrs r2, cpsr
258 stmia r8!, {r2}
259
260 mrc p15, 0, r4, c1, c0, 0
261 /* save control register */
262 stmia r8!, {r4}
263
264 clean_caches:
265 /*
266 * Clean Data or unified cache to POU
267 * How to invalidate only L1 cache???? - #FIX_ME#
268 * mcr p15, 0, r11, c7, c11, 1
269 */
270 cmp r1, #0x1 @ Check whether L2 inval is required
271 beq omap3_do_wfi
272
273 clean_l2:
274 /*
275 * jump out to kernel flush routine
276 * - reuse that code is better
277 * - it executes in a cached space so is faster than refetch per-block
278 * - should be faster and will change with kernel
279 * - 'might' have to copy address, load and jump to it
280 */
281 ldr r1, kernel_flush
282 mov lr, pc
283 bx r1
284
285 omap3_do_wfi:
286 ldr r4, sdrc_power @ read the SDRC_POWER register
287 ldr r5, [r4] @ read the contents of SDRC_POWER
288 orr r5, r5, #0x40 @ enable self refresh on idle req
289 str r5, [r4] @ write back to SDRC_POWER register
290
291 /* Data memory barrier and Data sync barrier */
292 mov r1, #0
293 mcr p15, 0, r1, c7, c10, 4
294 mcr p15, 0, r1, c7, c10, 5
295
296 /*
297 * ===================================
298 * == WFI instruction => Enter idle ==
299 * ===================================
300 */
301 wfi @ wait for interrupt
302
303 /*
304 * ===================================
305 * == Resume path for non-OFF modes ==
306 * ===================================
307 */
308 nop
309 nop
310 nop
311 nop
312 nop
313 nop
314 nop
315 nop
316 nop
317 nop
318 bl wait_sdrc_ok
319
320 /*
321 * ===================================
322 * == Exit point from non-OFF modes ==
323 * ===================================
324 */
325 ldmfd sp!, {r0-r12, pc} @ restore regs and return
326
327
328 /*
329 * ==============================
330 * == Resume path for OFF mode ==
331 * ==============================
332 */
333
334 /*
335 * The restore_* functions are called by the ROM code
336 * when back from WFI in OFF mode.
337 * Cf. the get_*restore_pointer functions.
338 *
339 * restore_es3: applies to 34xx >= ES3.0
340 * restore_3630: applies to 36xx
341 * restore: common code for 3xxx
342 */
343 restore_es3:
344 ldr r5, pm_prepwstst_core_p
345 ldr r4, [r5]
346 and r4, r4, #0x3
347 cmp r4, #0x0 @ Check if previous power state of CORE is OFF
348 bne restore
349 adr r0, es3_sdrc_fix
350 ldr r1, sram_base
351 ldr r2, es3_sdrc_fix_sz
352 mov r2, r2, ror #2
353 copy_to_sram:
354 ldmia r0!, {r3} @ val = *src
355 stmia r1!, {r3} @ *dst = val
356 subs r2, r2, #0x1 @ num_words--
357 bne copy_to_sram
358 ldr r1, sram_base
359 blx r1
360 b restore
361
362 restore_3630:
363 ldr r1, pm_prepwstst_core_p
364 ldr r2, [r1]
365 and r2, r2, #0x3
366 cmp r2, #0x0 @ Check if previous power state of CORE is OFF
367 bne restore
368 /* Disable RTA before giving control */
369 ldr r1, control_mem_rta
370 mov r2, #OMAP36XX_RTA_DISABLE
371 str r2, [r1]
372
373 /* Fall through to common code for the remaining logic */
374
375 restore:
376 /*
377 * Check what was the reason for mpu reset and store the reason in r9:
378 * 0 - No context lost
379 * 1 - Only L1 and logic lost
380 * 2 - Only L2 lost - In this case, we wont be here
381 * 3 - Both L1 and L2 lost
382 */
383 ldr r1, pm_pwstctrl_mpu
384 ldr r2, [r1]
385 and r2, r2, #0x3
386 cmp r2, #0x0 @ Check if target power state was OFF or RET
387 moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
388 movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
389 bne logic_l1_restore
390
391 ldr r0, l2dis_3630
392 cmp r0, #0x1 @ should we disable L2 on 3630?
393 bne skipl2dis
394 mrc p15, 0, r0, c1, c0, 1
395 bic r0, r0, #2 @ disable L2 cache
396 mcr p15, 0, r0, c1, c0, 1
397 skipl2dis:
398 ldr r0, control_stat
399 ldr r1, [r0]
400 and r1, #0x700
401 cmp r1, #0x300
402 beq l2_inv_gp
403 mov r0, #40 @ set service ID for PPA
404 mov r12, r0 @ copy secure Service ID in r12
405 mov r1, #0 @ set task id for ROM code in r1
406 mov r2, #4 @ set some flags in r2, r6
407 mov r6, #0xff
408 adr r3, l2_inv_api_params @ r3 points to dummy parameters
409 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
410 mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
411 .word 0xE1600071 @ call SMI monitor (smi #1)
412 /* Write to Aux control register to set some bits */
413 mov r0, #42 @ set service ID for PPA
414 mov r12, r0 @ copy secure Service ID in r12
415 mov r1, #0 @ set task id for ROM code in r1
416 mov r2, #4 @ set some flags in r2, r6
417 mov r6, #0xff
418 ldr r4, scratchpad_base
419 ldr r3, [r4, #0xBC] @ r3 points to parameters
420 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
421 mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
422 .word 0xE1600071 @ call SMI monitor (smi #1)
423
424 #ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
425 /* Restore L2 aux control register */
426 @ set service ID for PPA
427 mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
428 mov r12, r0 @ copy service ID in r12
429 mov r1, #0 @ set task ID for ROM code in r1
430 mov r2, #4 @ set some flags in r2, r6
431 mov r6, #0xff
432 ldr r4, scratchpad_base
433 ldr r3, [r4, #0xBC]
434 adds r3, r3, #8 @ r3 points to parameters
435 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
436 mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
437 .word 0xE1600071 @ call SMI monitor (smi #1)
438 #endif
439 b logic_l1_restore
440
441 l2_inv_api_params:
442 .word 0x1, 0x00
443 l2_inv_gp:
444 /* Execute smi to invalidate L2 cache */
445 mov r12, #0x1 @ set up to invalidate L2
446 .word 0xE1600070 @ Call SMI monitor (smieq)
447 /* Write to Aux control register to set some bits */
448 ldr r4, scratchpad_base
449 ldr r3, [r4,#0xBC]
450 ldr r0, [r3,#4]
451 mov r12, #0x3
452 .word 0xE1600070 @ Call SMI monitor (smieq)
453 ldr r4, scratchpad_base
454 ldr r3, [r4,#0xBC]
455 ldr r0, [r3,#12]
456 mov r12, #0x2
457 .word 0xE1600070 @ Call SMI monitor (smieq)
458 logic_l1_restore:
459 ldr r1, l2dis_3630
460 cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
461 bne skipl2reen
462 mrc p15, 0, r1, c1, c0, 1
463 orr r1, r1, #2 @ re-enable L2 cache
464 mcr p15, 0, r1, c1, c0, 1
465 skipl2reen:
466 mov r1, #0
467 /*
468 * Invalidate all instruction caches to PoU
469 * and flush branch target cache
470 */
471 mcr p15, 0, r1, c7, c5, 0
472
473 ldr r4, scratchpad_base
474 ldr r3, [r4,#0xBC]
475 adds r3, r3, #16
476 ldmia r3!, {r4-r6}
477 mov sp, r4
478 msr spsr_cxsf, r5
479 mov lr, r6
480
481 ldmia r3!, {r4-r9}
482 /* Coprocessor access Control Register */
483 mcr p15, 0, r4, c1, c0, 2
484
485 /* TTBR0 */
486 MCR p15, 0, r5, c2, c0, 0
487 /* TTBR1 */
488 MCR p15, 0, r6, c2, c0, 1
489 /* Translation table base control register */
490 MCR p15, 0, r7, c2, c0, 2
491 /* Domain access Control Register */
492 MCR p15, 0, r8, c3, c0, 0
493 /* Data fault status Register */
494 MCR p15, 0, r9, c5, c0, 0
495
496 ldmia r3!,{r4-r8}
497 /* Instruction fault status Register */
498 MCR p15, 0, r4, c5, c0, 1
499 /* Data Auxiliary Fault Status Register */
500 MCR p15, 0, r5, c5, c1, 0
501 /* Instruction Auxiliary Fault Status Register*/
502 MCR p15, 0, r6, c5, c1, 1
503 /* Data Fault Address Register */
504 MCR p15, 0, r7, c6, c0, 0
505 /* Instruction Fault Address Register*/
506 MCR p15, 0, r8, c6, c0, 2
507 ldmia r3!,{r4-r7}
508
509 /* User r/w thread and process ID */
510 MCR p15, 0, r4, c13, c0, 2
511 /* User ro thread and process ID */
512 MCR p15, 0, r5, c13, c0, 3
513 /* Privileged only thread and process ID */
514 MCR p15, 0, r6, c13, c0, 4
515 /* Cache size selection */
516 MCR p15, 2, r7, c0, c0, 0
517 ldmia r3!,{r4-r8}
518 /* Data TLB lockdown registers */
519 MCR p15, 0, r4, c10, c0, 0
520 /* Instruction TLB lockdown registers */
521 MCR p15, 0, r5, c10, c0, 1
522 /* Secure or Nonsecure Vector Base Address */
523 MCR p15, 0, r6, c12, c0, 0
524 /* FCSE PID */
525 MCR p15, 0, r7, c13, c0, 0
526 /* Context PID */
527 MCR p15, 0, r8, c13, c0, 1
528
529 ldmia r3!,{r4-r5}
530 /* Primary memory remap register */
531 MCR p15, 0, r4, c10, c2, 0
532 /* Normal memory remap register */
533 MCR p15, 0, r5, c10, c2, 1
534
535 /* Restore cpsr */
536 ldmia r3!,{r4} @ load CPSR from SDRAM
537 msr cpsr, r4 @ store cpsr
538
539 /* Enabling MMU here */
540 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
541 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
542 and r7, #0x7
543 cmp r7, #0x0
544 beq usettbr0
545 ttbr_error:
546 /*
547 * More work needs to be done to support N[0:2] value other than 0
548 * So looping here so that the error can be detected
549 */
550 b ttbr_error
551 usettbr0:
552 mrc p15, 0, r2, c2, c0, 0
553 ldr r5, ttbrbit_mask
554 and r2, r5
555 mov r4, pc
556 ldr r5, table_index_mask
557 and r4, r5 @ r4 = 31 to 20 bits of pc
558 /* Extract the value to be written to table entry */
559 ldr r1, table_entry
560 /* r1 has the value to be written to table entry*/
561 add r1, r1, r4
562 /* Getting the address of table entry to modify */
563 lsr r4, #18
564 /* r2 has the location which needs to be modified */
565 add r2, r4
566 /* Storing previous entry of location being modified */
567 ldr r5, scratchpad_base
568 ldr r4, [r2]
569 str r4, [r5, #0xC0]
570 /* Modify the table entry */
571 str r1, [r2]
572 /*
573 * Storing address of entry being modified
574 * - will be restored after enabling MMU
575 */
576 ldr r5, scratchpad_base
577 str r2, [r5, #0xC4]
578
579 mov r0, #0
580 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
581 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
582 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
583 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
584 /*
585 * Restore control register. This enables the MMU.
586 * The caches and prediction are not enabled here, they
587 * will be enabled after restoring the MMU table entry.
588 */
589 ldmia r3!, {r4}
590 /* Store previous value of control register in scratchpad */
591 str r4, [r5, #0xC8]
592 ldr r2, cache_pred_disable_mask
593 and r4, r2
594 mcr p15, 0, r4, c1, c0, 0
595
596 /*
597 * ==============================
598 * == Exit point from OFF mode ==
599 * ==============================
600 */
601 ldmfd sp!, {r0-r12, pc} @ restore regs and return
602
603
604 /*
605 * Internal functions
606 */
607
608 /* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
609 .text
610 ENTRY(es3_sdrc_fix)
611 ldr r4, sdrc_syscfg @ get config addr
612 ldr r5, [r4] @ get value
613 tst r5, #0x100 @ is part access blocked
614 it eq
615 biceq r5, r5, #0x100 @ clear bit if set
616 str r5, [r4] @ write back change
617 ldr r4, sdrc_mr_0 @ get config addr
618 ldr r5, [r4] @ get value
619 str r5, [r4] @ write back change
620 ldr r4, sdrc_emr2_0 @ get config addr
621 ldr r5, [r4] @ get value
622 str r5, [r4] @ write back change
623 ldr r4, sdrc_manual_0 @ get config addr
624 mov r5, #0x2 @ autorefresh command
625 str r5, [r4] @ kick off refreshes
626 ldr r4, sdrc_mr_1 @ get config addr
627 ldr r5, [r4] @ get value
628 str r5, [r4] @ write back change
629 ldr r4, sdrc_emr2_1 @ get config addr
630 ldr r5, [r4] @ get value
631 str r5, [r4] @ write back change
632 ldr r4, sdrc_manual_1 @ get config addr
633 mov r5, #0x2 @ autorefresh command
634 str r5, [r4] @ kick off refreshes
635 bx lr
636
637 sdrc_syscfg:
638 .word SDRC_SYSCONFIG_P
639 sdrc_mr_0:
640 .word SDRC_MR_0_P
641 sdrc_emr2_0:
642 .word SDRC_EMR2_0_P
643 sdrc_manual_0:
644 .word SDRC_MANUAL_0_P
645 sdrc_mr_1:
646 .word SDRC_MR_1_P
647 sdrc_emr2_1:
648 .word SDRC_EMR2_1_P
649 sdrc_manual_1:
650 .word SDRC_MANUAL_1_P
651 ENTRY(es3_sdrc_fix_sz)
652 .word . - es3_sdrc_fix
653
654 /*
655 * This function implements the erratum ID i581 WA:
656 * SDRC state restore before accessing the SDRAM
657 *
658 * Only used at return from non-OFF mode. For OFF
659 * mode the ROM code configures the SDRC and
660 * the DPLL before calling the restore code directly
661 * from DDR.
662 */
663
664 /* Make sure SDRC accesses are ok */
665 wait_sdrc_ok:
666
667 /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
668 ldr r4, cm_idlest_ckgen
669 wait_dpll3_lock:
670 ldr r5, [r4]
671 tst r5, #1
672 beq wait_dpll3_lock
673
674 ldr r4, cm_idlest1_core
675 wait_sdrc_ready:
676 ldr r5, [r4]
677 tst r5, #0x2
678 bne wait_sdrc_ready
679 /* allow DLL powerdown upon hw idle req */
680 ldr r4, sdrc_power
681 ldr r5, [r4]
682 bic r5, r5, #0x40
683 str r5, [r4]
684
685 is_dll_in_lock_mode:
686 /* Is dll in lock mode? */
687 ldr r4, sdrc_dlla_ctrl
688 ldr r5, [r4]
689 tst r5, #0x4
690 bxne lr @ Return if locked
691 /* wait till dll locks */
692 wait_dll_lock_timed:
693 ldr r4, wait_dll_lock_counter
694 add r4, r4, #1
695 str r4, wait_dll_lock_counter
696 ldr r4, sdrc_dlla_status
697 /* Wait 20uS for lock */
698 mov r6, #8
699 wait_dll_lock:
700 subs r6, r6, #0x1
701 beq kick_dll
702 ldr r5, [r4]
703 and r5, r5, #0x4
704 cmp r5, #0x4
705 bne wait_dll_lock
706 bx lr @ Return when locked
707
708 /* disable/reenable DLL if not locked */
709 kick_dll:
710 ldr r4, sdrc_dlla_ctrl
711 ldr r5, [r4]
712 mov r6, r5
713 bic r6, #(1<<3) @ disable dll
714 str r6, [r4]
715 dsb
716 orr r6, r6, #(1<<3) @ enable dll
717 str r6, [r4]
718 dsb
719 ldr r4, kick_counter
720 add r4, r4, #1
721 str r4, kick_counter
722 b wait_dll_lock_timed
723
724 cm_idlest1_core:
725 .word CM_IDLEST1_CORE_V
726 cm_idlest_ckgen:
727 .word CM_IDLEST_CKGEN_V
728 sdrc_dlla_status:
729 .word SDRC_DLLA_STATUS_V
730 sdrc_dlla_ctrl:
731 .word SDRC_DLLA_CTRL_V
732 pm_prepwstst_core_p:
733 .word PM_PREPWSTST_CORE_P
734 pm_pwstctrl_mpu:
735 .word PM_PWSTCTRL_MPU_P
736 scratchpad_base:
737 .word SCRATCHPAD_BASE_P
738 sram_base:
739 .word SRAM_BASE_P + 0x8000
740 sdrc_power:
741 .word SDRC_POWER_V
742 ttbrbit_mask:
743 .word 0xFFFFC000
744 table_index_mask:
745 .word 0xFFF00000
746 table_entry:
747 .word 0x00000C02
748 cache_pred_disable_mask:
749 .word 0xFFFFE7FB
750 control_stat:
751 .word CONTROL_STAT
752 control_mem_rta:
753 .word CONTROL_MEM_RTA_CTRL
754 kernel_flush:
755 .word v7_flush_dcache_all
756 l2dis_3630:
757 .word 0
758 /*
759 * When exporting to userspace while the counters are in SRAM,
760 * these 2 words need to be at the end to facilitate retrival!
761 */
762 kick_counter:
763 .word 0
764 wait_dll_lock_counter:
765 .word 0
766
767 ENTRY(omap34xx_cpu_suspend_sz)
768 .word . - omap34xx_cpu_suspend
This page took 0.077209 seconds and 5 git commands to generate.