Commit | Line | Data |
---|---|---|
f6b0fa02 | 1 | #include <linux/linkage.h> |
941aefac | 2 | #include <linux/threads.h> |
f6b0fa02 RK |
3 | #include <asm/asm-offsets.h> |
4 | #include <asm/assembler.h> | |
5 | #include <asm/glue-cache.h> | |
6 | #include <asm/glue-proc.h> | |
f6b0fa02 RK |
7 | .text |
8 | ||
7604537b LP |
9 | /* |
10 | * Implementation of MPIDR hash algorithm through shifting | |
11 | * and OR'ing. | |
12 | * | |
13 | * @dst: register containing hash result | |
14 | * @rs0: register containing affinity level 0 bit shift | |
15 | * @rs1: register containing affinity level 1 bit shift | |
16 | * @rs2: register containing affinity level 2 bit shift | |
17 | * @mpidr: register containing MPIDR value | |
18 | * @mask: register containing MPIDR mask | |
19 | * | |
20 | * Pseudo C-code: | |
21 | * | |
22 | *u32 dst; | |
23 | * | |
24 | *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) { | |
25 | * u32 aff0, aff1, aff2; | |
26 | * u32 mpidr_masked = mpidr & mask; | |
27 | * aff0 = mpidr_masked & 0xff; | |
28 | * aff1 = mpidr_masked & 0xff00; | |
29 | * aff2 = mpidr_masked & 0xff0000; | |
30 | * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2); | |
31 | *} | |
32 | * Input registers: rs0, rs1, rs2, mpidr, mask | |
33 | * Output register: dst | |
34 | * Note: input and output registers must be disjoint register sets | |
35 | (eg: a macro instance with mpidr = r1 and dst = r1 is invalid) | |
36 | */ | |
37 | .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask | |
38 | and \mpidr, \mpidr, \mask @ mask out MPIDR bits | |
39 | and \dst, \mpidr, #0xff @ mask=aff0 | |
40 | ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0 | |
41 | THUMB( lsr \dst, \dst, \rs0 ) | |
42 | and \mask, \mpidr, #0xff00 @ mask = aff1 | |
43 | ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1) | |
44 | THUMB( lsr \mask, \mask, \rs1 ) | |
45 | THUMB( orr \dst, \dst, \mask ) | |
46 | and \mask, \mpidr, #0xff0000 @ mask = aff2 | |
47 | ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2) | |
48 | THUMB( lsr \mask, \mask, \rs2 ) | |
49 | THUMB( orr \dst, \dst, \mask ) | |
50 | .endm | |
51 | ||
f6b0fa02 | 52 | /* |
abda1bd5 RK |
53 | * Save CPU state for a suspend. This saves the CPU general purpose |
54 | * registers, and allocates space on the kernel stack to save the CPU | |
55 | * specific registers and some other data for resume. | |
56 | * r0 = suspend function arg0 | |
57 | * r1 = suspend function | |
71a8986d | 58 | * r2 = MPIDR value the resuming CPU will use |
f6b0fa02 | 59 | */ |
2c74a0ce | 60 | ENTRY(__cpu_suspend) |
e8856a87 | 61 | stmfd sp!, {r4 - r11, lr} |
f6b0fa02 RK |
62 | #ifdef MULTI_CPU |
63 | ldr r10, =processor | |
abda1bd5 | 64 | ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state |
941aefac | 65 | #else |
abda1bd5 | 66 | ldr r4, =cpu_suspend_size |
3fd431bd | 67 | #endif |
abda1bd5 RK |
68 | mov r5, sp @ current virtual SP |
69 | add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn | |
70 | sub sp, sp, r4 @ allocate CPU state on stack | |
abda1bd5 | 71 | ldr r3, =sleep_save_sp |
71a8986d | 72 | stmfd sp!, {r0, r1} @ save suspend func arg and pointer |
7604537b | 73 | ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] |
71a8986d NP |
74 | ALT_SMP(ldr r0, =mpidr_hash) |
75 | ALT_UP_B(1f) | |
76 | /* This ldmia relies on the memory layout of the mpidr_hash struct */ | |
77 | ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts | |
78 | compute_mpidr_hash r0, r6, r7, r8, r2, r1 | |
79 | add r3, r3, r0, lsl #2 | |
80 | 1: mov r2, r5 @ virtual SP | |
81 | mov r1, r4 @ size of save block | |
82 | add r0, sp, #8 @ pointer to save block | |
abda1bd5 | 83 | bl __cpu_suspend_save |
14327c66 | 84 | badr lr, cpu_suspend_abort |
3799bbe5 | 85 | ldmfd sp!, {r0, pc} @ call suspend fn |
2c74a0ce | 86 | ENDPROC(__cpu_suspend) |
f6b0fa02 RK |
87 | .ltorg |
88 | ||
29cb3cd2 | 89 | cpu_suspend_abort: |
de8e71ca | 90 | ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn |
f5fa68d9 RK |
91 | teq r0, #0 |
92 | moveq r0, #1 @ force non-zero value | |
29cb3cd2 RK |
93 | mov sp, r2 |
94 | ldmfd sp!, {r4 - r11, pc} | |
95 | ENDPROC(cpu_suspend_abort) | |
96 | ||
f6b0fa02 RK |
97 | /* |
98 | * r0 = control register value | |
f6b0fa02 | 99 | */ |
62b2d07c | 100 | .align 5 |
e6eadc67 | 101 | .pushsection .idmap.text,"ax" |
f6b0fa02 | 102 | ENTRY(cpu_resume_mmu) |
f6b0fa02 | 103 | ldr r3, =cpu_resume_after_mmu |
d675d0bc | 104 | instr_sync |
e8ce0eb5 RK |
105 | mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc |
106 | mrc p15, 0, r0, c0, c0, 0 @ read id reg | |
d675d0bc | 107 | instr_sync |
e8ce0eb5 RK |
108 | mov r0, r0 |
109 | mov r0, r0 | |
6ebbf2ce | 110 | ret r3 @ jump to virtual address |
62b2d07c | 111 | ENDPROC(cpu_resume_mmu) |
e6eadc67 | 112 | .popsection |
f6b0fa02 | 113 | cpu_resume_after_mmu: |
14cd8fd5 | 114 | bl cpu_init @ restore the und/abt/irq banked regs |
29cb3cd2 | 115 | mov r0, #0 @ return zero on success |
5fa94c81 | 116 | ldmfd sp!, {r4 - r11, pc} |
f6b0fa02 RK |
117 | ENDPROC(cpu_resume_after_mmu) |
118 | ||
d0776aff | 119 | .text |
f6b0fa02 | 120 | .align |
2678bb9f RK |
121 | |
122 | #ifdef CONFIG_MMU | |
32e55a77 SB |
123 | .arm |
124 | ENTRY(cpu_resume_arm) | |
9ce93bdd | 125 | THUMB( badr r9, 1f ) @ Kernel is entered in ARM. |
32e55a77 SB |
126 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, |
127 | THUMB( .thumb ) @ switch to Thumb now. | |
128 | THUMB(1: ) | |
2678bb9f RK |
129 | #endif |
130 | ||
f6b0fa02 | 131 | ENTRY(cpu_resume) |
97bcb0fe | 132 | ARM_BE8(setend be) @ ensure we are in BE mode |
0e0779da LP |
133 | #ifdef CONFIG_ARM_VIRT_EXT |
134 | bl __hyp_stub_install_secondary | |
135 | #endif | |
136 | safe_svcmode_maskall r1 | |
7604537b LP |
137 | mov r1, #0 |
138 | ALT_SMP(mrc p15, 0, r0, c0, c0, 5) | |
139 | ALT_UP_B(1f) | |
140 | adr r2, mpidr_hash_ptr | |
141 | ldr r3, [r2] | |
142 | add r2, r2, r3 @ r2 = struct mpidr_hash phys address | |
143 | /* | |
144 | * This ldmia relies on the memory layout of the mpidr_hash | |
145 | * struct mpidr_hash. | |
146 | */ | |
147 | ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts | |
148 | compute_mpidr_hash r1, r4, r5, r6, r0, r3 | |
149 | 1: | |
150 | adr r0, _sleep_save_sp | |
d0776aff AB |
151 | ldr r2, [r0] |
152 | add r0, r0, r2 | |
7604537b LP |
153 | ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] |
154 | ldr r0, [r0, r1, lsl #2] | |
155 | ||
de8e71ca RK |
156 | @ load phys pgd, stack, resume fn |
157 | ARM( ldmia r0!, {r1, sp, pc} ) | |
158 | THUMB( ldmia r0!, {r1, r2, r3} ) | |
159 | THUMB( mov sp, r2 ) | |
160 | THUMB( bx r3 ) | |
f6b0fa02 RK |
161 | ENDPROC(cpu_resume) |
162 | ||
2678bb9f | 163 | #ifdef CONFIG_MMU |
32e55a77 | 164 | ENDPROC(cpu_resume_arm) |
2678bb9f | 165 | #endif |
f6b0fa02 | 166 | |
7604537b | 167 | .align 2 |
d0776aff AB |
168 | _sleep_save_sp: |
169 | .long sleep_save_sp - . | |
7604537b LP |
170 | mpidr_hash_ptr: |
171 | .long mpidr_hash - . @ mpidr_hash struct offset | |
172 | ||
d0776aff | 173 | .data |
7604537b LP |
174 | .type sleep_save_sp, #object |
175 | ENTRY(sleep_save_sp) | |
7604537b | 176 | .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp |