ARM: pm: extract common code from MULTI_CPU/!MULTI_CPU paths
[deliverable/linux.git] / arch / arm / kernel / sleep.S
CommitLineData
f6b0fa02 1#include <linux/linkage.h>
941aefac 2#include <linux/threads.h>
f6b0fa02
RK
3#include <asm/asm-offsets.h>
4#include <asm/assembler.h>
5#include <asm/glue-cache.h>
6#include <asm/glue-proc.h>
7#include <asm/system.h>
8 .text
9
10/*
11 * Save CPU state for a suspend
12 * r1 = v:p offset
13 * r3 = virtual return function
14 * Note: sp is decremented to allocate space for CPU state on stack
15 * r0-r3,r9,r10,lr corrupted
16 */
17ENTRY(cpu_suspend)
2fefbcd5 18 stmfd sp!, {r3}
f6b0fa02
RK
19 mov r9, lr
20#ifdef MULTI_CPU
21 ldr r10, =processor
f6b0fa02
RK
22 ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
23 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
941aefac 24#else
f6b0fa02 25 ldr r0, =cpu_suspend_size
6b5f6ab0 26 ldr ip, =cpu_do_resume
3fd431bd
RK
27#endif
28 mov r2, sp @ current virtual SP
f6b0fa02
RK
29 sub sp, sp, r0 @ allocate CPU state on stack
30 mov r0, sp @ save pointer
6b5f6ab0 31 add ip, ip, r1 @ convert resume fn to phys
2fefbcd5 32 stmfd sp!, {r1, r2, ip} @ save v:p, virt SP, phys resume fn
f6b0fa02
RK
33 ldr r3, =sleep_save_sp
34 add r2, sp, r1 @ convert SP to phys
941aefac
RK
35#ifdef CONFIG_SMP
36 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
37 ALT_UP(mov lr, #0)
38 and lr, lr, #15
39 str r2, [r3, lr, lsl #2] @ save phys SP
40#else
f6b0fa02 41 str r2, [r3] @ save phys SP
941aefac 42#endif
3fd431bd
RK
43#ifdef MULTI_CPU
44 mov lr, pc
45 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
46#else
f6b0fa02
RK
47 bl cpu_do_suspend
48#endif
49
50 @ flush data cache
51#ifdef MULTI_CACHE
52 ldr r10, =cpu_cache
53 mov lr, r9
54 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
55#else
56 mov lr, r9
57 b __cpuc_flush_kern_all
58#endif
59ENDPROC(cpu_suspend)
60 .ltorg
61
62/*
63 * r0 = control register value
64 * r1 = v:p offset (preserved by cpu_do_resume)
65 * r2 = phys page table base
66 * r3 = L1 section flags
67 */
68ENTRY(cpu_resume_mmu)
69 adr r4, cpu_resume_turn_mmu_on
70 mov r4, r4, lsr #20
71 orr r3, r3, r4, lsl #20
72 ldr r5, [r2, r4, lsl #2] @ save old mapping
73 str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
74 sub r2, r2, r1
75 ldr r3, =cpu_resume_after_mmu
76 bic r1, r0, #CR_C @ ensure D-cache is disabled
77 b cpu_resume_turn_mmu_on
78ENDPROC(cpu_resume_mmu)
79 .ltorg
80 .align 5
81cpu_resume_turn_mmu_on:
82 mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
83 mrc p15, 0, r1, c0, c0, 0 @ read id reg
84 mov r1, r1
85 mov r1, r1
86 mov pc, r3 @ jump to virtual address
87ENDPROC(cpu_resume_turn_mmu_on)
88cpu_resume_after_mmu:
89 str r5, [r2, r4, lsl #2] @ restore old mapping
90 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
2fefbcd5 91 ldmfd sp!, {pc}
f6b0fa02
RK
92ENDPROC(cpu_resume_after_mmu)
93
94/*
95 * Note: Yes, part of the following code is located into the .data section.
96 * This is to allow sleep_save_sp to be accessed with a relative load
97 * while we can't rely on any MMU translation. We could have put
98 * sleep_save_sp in the .text section as well, but some setups might
99 * insist on it to be truly read-only.
100 */
101 .data
102 .align
103ENTRY(cpu_resume)
941aefac
RK
104#ifdef CONFIG_SMP
105 adr r0, sleep_save_sp
106 ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
107 ALT_UP(mov r1, #0)
108 and r1, r1, #15
109 ldr r0, [r0, r1, lsl #2] @ stack phys addr
110#else
f6b0fa02 111 ldr r0, sleep_save_sp @ stack phys addr
941aefac 112#endif
fb4fe87d 113 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
2fefbcd5
RK
114 @ load v:p, stack, resume fn
115 ARM( ldmia r0!, {r1, sp, pc} )
116THUMB( ldmia r0!, {r1, r2, r3} )
fb4fe87d 117THUMB( mov sp, r2 )
2fefbcd5 118THUMB( bx r3 )
f6b0fa02
RK
119ENDPROC(cpu_resume)
120
121sleep_save_sp:
941aefac
RK
122 .rept CONFIG_NR_CPUS
123 .long 0 @ preserve stack phys ptr here
124 .endr
This page took 0.071426 seconds and 5 git commands to generate.