Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
4cd35f67 | 16 | * Copyright 2011 Freescale Semiconductor, Inc. |
bbf45ba5 HB |
17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <asm/ppc_asm.h> | |
22 | #include <asm/kvm_asm.h> | |
23 | #include <asm/reg.h> | |
bbf45ba5 HB |
24 | #include <asm/page.h> |
25 | #include <asm/asm-offsets.h> | |
26 | ||
bbf45ba5 HB |
27 | /* The host stack layout: */ |
28 | #define HOST_R1 0 /* Implied by stwu. */ | |
29 | #define HOST_CALLEE_LR 4 | |
30 | #define HOST_RUN 8 | |
31 | /* r2 is special: it holds 'current', and it made nonvolatile in the | |
32 | * kernel with the -ffixed-r2 gcc option. */ | |
33 | #define HOST_R2 12 | |
e1f8acf8 AG |
34 | #define HOST_CR 16 |
35 | #define HOST_NV_GPRS 20 | |
0b7673c3 MN |
36 | #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) |
37 | #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) | |
c75df6f9 | 38 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) |
bbf45ba5 HB |
39 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ |
40 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | |
41 | ||
42 | #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ | |
6a0ab738 HB |
43 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
44 | (1<<BOOKE_INTERRUPT_DEBUG)) | |
bbf45ba5 HB |
45 | |
46 | #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | |
011da899 AG |
47 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
48 | (1<<BOOKE_INTERRUPT_ALIGNMENT)) | |
bbf45ba5 HB |
49 | |
50 | #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | |
51 | (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ | |
52 | (1<<BOOKE_INTERRUPT_PROGRAM) | \ | |
011da899 AG |
53 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
54 | (1<<BOOKE_INTERRUPT_ALIGNMENT)) | |
bbf45ba5 | 55 | |
15b708be | 56 | .macro __KVM_HANDLER ivor_nr scratch srr0 |
bbf45ba5 | 57 | /* Get pointer to vcpu and record exit number. */ |
75c44bbb | 58 | mtspr \scratch , r4 |
ffe129ec BB |
59 | mfspr r4, SPRN_SPRG_THREAD |
60 | lwz r4, THREAD_KVM_VCPU(r4) | |
5fecc9d8 | 61 | stw r3, VCPU_GPR(R3)(r4) |
c75df6f9 MN |
62 | stw r5, VCPU_GPR(R5)(r4) |
63 | stw r6, VCPU_GPR(R6)(r4) | |
75c44bbb | 64 | mfspr r3, \scratch |
bbf45ba5 | 65 | mfctr r5 |
5fecc9d8 | 66 | stw r3, VCPU_GPR(R4)(r4) |
bbf45ba5 | 67 | stw r5, VCPU_CTR(r4) |
75c44bbb BB |
68 | mfspr r3, \srr0 |
69 | lis r6, kvmppc_resume_host@h | |
70 | stw r3, VCPU_PC(r4) | |
bbf45ba5 HB |
71 | li r5, \ivor_nr |
72 | ori r6, r6, kvmppc_resume_host@l | |
73 | mtctr r6 | |
74 | bctr | |
75 | .endm | |
76 | ||
15b708be BB |
77 | .macro KVM_HANDLER ivor_nr scratch srr0 |
78 | _GLOBAL(kvmppc_handler_\ivor_nr) | |
79 | __KVM_HANDLER \ivor_nr \scratch \srr0 | |
80 | .endm | |
81 | ||
82 | .macro KVM_DBG_HANDLER ivor_nr scratch srr0 | |
83 | _GLOBAL(kvmppc_handler_\ivor_nr) | |
84 | mtspr \scratch, r4 | |
85 | mfspr r4, SPRN_SPRG_THREAD | |
86 | lwz r4, THREAD_KVM_VCPU(r4) | |
87 | stw r3, VCPU_CRIT_SAVE(r4) | |
88 | mfcr r3 | |
89 | mfspr r4, SPRN_CSRR1 | |
90 | andi. r4, r4, MSR_PR | |
91 | bne 1f | |
92 | /* debug interrupt happened in enter/exit path */ | |
93 | mfspr r4, SPRN_CSRR1 | |
94 | rlwinm r4, r4, 0, ~MSR_DE | |
95 | mtspr SPRN_CSRR1, r4 | |
96 | lis r4, 0xffff | |
97 | ori r4, r4, 0xffff | |
98 | mtspr SPRN_DBSR, r4 | |
99 | mfspr r4, SPRN_SPRG_THREAD | |
100 | lwz r4, THREAD_KVM_VCPU(r4) | |
101 | mtcr r3 | |
102 | lwz r3, VCPU_CRIT_SAVE(r4) | |
103 | mfspr r4, \scratch | |
104 | rfci | |
105 | 1: /* debug interrupt happened in guest */ | |
106 | mtcr r3 | |
107 | mfspr r4, SPRN_SPRG_THREAD | |
108 | lwz r4, THREAD_KVM_VCPU(r4) | |
109 | lwz r3, VCPU_CRIT_SAVE(r4) | |
110 | mfspr r4, \scratch | |
111 | __KVM_HANDLER \ivor_nr \scratch \srr0 | |
112 | .endm | |
113 | ||
1d542d9c BB |
114 | .macro KVM_HANDLER_ADDR ivor_nr |
115 | .long kvmppc_handler_\ivor_nr | |
116 | .endm | |
117 | ||
118 | .macro KVM_HANDLER_END | |
119 | .long kvmppc_handlers_end | |
120 | .endm | |
121 | ||
bbf45ba5 | 122 | _GLOBAL(kvmppc_handlers_start) |
75c44bbb BB |
123 | KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 |
124 | KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0 | |
125 | KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
126 | KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
127 | KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
128 | KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
129 | KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
130 | KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
131 | KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
132 | KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
133 | KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
134 | KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
135 | KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 | |
136 | KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
137 | KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
15b708be | 138 | KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 |
75c44bbb BB |
139 | KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
140 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
141 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 | |
1d542d9c | 142 | _GLOBAL(kvmppc_handlers_end) |
bbf45ba5 | 143 | |
bbf45ba5 | 144 | /* Registers: |
ee43eb78 | 145 | * SPRG_SCRATCH0: guest r4 |
bbf45ba5 HB |
146 | * r4: vcpu pointer |
147 | * r5: KVM exit number | |
148 | */ | |
149 | _GLOBAL(kvmppc_resume_host) | |
bbf45ba5 HB |
150 | mfcr r3 |
151 | stw r3, VCPU_CR(r4) | |
c75df6f9 MN |
152 | stw r7, VCPU_GPR(R7)(r4) |
153 | stw r8, VCPU_GPR(R8)(r4) | |
154 | stw r9, VCPU_GPR(R9)(r4) | |
bbf45ba5 HB |
155 | |
156 | li r6, 1 | |
157 | slw r6, r6, r5 | |
158 | ||
73e75b41 HB |
159 | #ifdef CONFIG_KVM_EXIT_TIMING |
160 | /* save exit time */ | |
161 | 1: | |
162 | mfspr r7, SPRN_TBRU | |
163 | mfspr r8, SPRN_TBRL | |
164 | mfspr r9, SPRN_TBRU | |
165 | cmpw r9, r7 | |
166 | bne 1b | |
167 | stw r8, VCPU_TIMING_EXIT_TBL(r4) | |
168 | stw r9, VCPU_TIMING_EXIT_TBU(r4) | |
169 | #endif | |
170 | ||
bbf45ba5 HB |
171 | /* Save the faulting instruction and all GPRs for emulation. */ |
172 | andi. r7, r6, NEED_INST_MASK | |
173 | beq ..skip_inst_copy | |
174 | mfspr r9, SPRN_SRR0 | |
175 | mfmsr r8 | |
176 | ori r7, r8, MSR_DS | |
177 | mtmsr r7 | |
178 | isync | |
179 | lwz r9, 0(r9) | |
180 | mtmsr r8 | |
181 | isync | |
182 | stw r9, VCPU_LAST_INST(r4) | |
183 | ||
c75df6f9 MN |
184 | stw r15, VCPU_GPR(R15)(r4) |
185 | stw r16, VCPU_GPR(R16)(r4) | |
186 | stw r17, VCPU_GPR(R17)(r4) | |
187 | stw r18, VCPU_GPR(R18)(r4) | |
188 | stw r19, VCPU_GPR(R19)(r4) | |
189 | stw r20, VCPU_GPR(R20)(r4) | |
190 | stw r21, VCPU_GPR(R21)(r4) | |
191 | stw r22, VCPU_GPR(R22)(r4) | |
192 | stw r23, VCPU_GPR(R23)(r4) | |
193 | stw r24, VCPU_GPR(R24)(r4) | |
194 | stw r25, VCPU_GPR(R25)(r4) | |
195 | stw r26, VCPU_GPR(R26)(r4) | |
196 | stw r27, VCPU_GPR(R27)(r4) | |
197 | stw r28, VCPU_GPR(R28)(r4) | |
198 | stw r29, VCPU_GPR(R29)(r4) | |
199 | stw r30, VCPU_GPR(R30)(r4) | |
200 | stw r31, VCPU_GPR(R31)(r4) | |
bbf45ba5 HB |
201 | ..skip_inst_copy: |
202 | ||
203 | /* Also grab DEAR and ESR before the host can clobber them. */ | |
204 | ||
205 | andi. r7, r6, NEED_DEAR_MASK | |
206 | beq ..skip_dear | |
207 | mfspr r9, SPRN_DEAR | |
208 | stw r9, VCPU_FAULT_DEAR(r4) | |
209 | ..skip_dear: | |
210 | ||
211 | andi. r7, r6, NEED_ESR_MASK | |
212 | beq ..skip_esr | |
213 | mfspr r9, SPRN_ESR | |
214 | stw r9, VCPU_FAULT_ESR(r4) | |
215 | ..skip_esr: | |
216 | ||
217 | /* Save remaining volatile guest register state to vcpu. */ | |
c75df6f9 MN |
218 | stw r0, VCPU_GPR(R0)(r4) |
219 | stw r1, VCPU_GPR(R1)(r4) | |
220 | stw r2, VCPU_GPR(R2)(r4) | |
221 | stw r10, VCPU_GPR(R10)(r4) | |
222 | stw r11, VCPU_GPR(R11)(r4) | |
223 | stw r12, VCPU_GPR(R12)(r4) | |
224 | stw r13, VCPU_GPR(R13)(r4) | |
225 | stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ | |
bbf45ba5 HB |
226 | mflr r3 |
227 | stw r3, VCPU_LR(r4) | |
228 | mfxer r3 | |
229 | stw r3, VCPU_XER(r4) | |
bbf45ba5 HB |
230 | |
231 | /* Restore host stack pointer and PID before IVPR, since the host | |
232 | * exception handlers use them. */ | |
233 | lwz r1, VCPU_HOST_STACK(r4) | |
234 | lwz r3, VCPU_HOST_PID(r4) | |
235 | mtspr SPRN_PID, r3 | |
236 | ||
dd9ebf1f LY |
237 | #ifdef CONFIG_FSL_BOOKE |
238 | /* we cheat and know that Linux doesn't use PID1 which is always 0 */ | |
239 | lis r3, 0 | |
240 | mtspr SPRN_PID1, r3 | |
241 | #endif | |
242 | ||
bbf45ba5 HB |
243 | /* Restore host IVPR before re-enabling interrupts. We cheat and know |
244 | * that Linux IVPR is always 0xc0000000. */ | |
245 | lis r3, 0xc000 | |
246 | mtspr SPRN_IVPR, r3 | |
247 | ||
248 | /* Switch to kernel stack and jump to handler. */ | |
249 | LOAD_REG_ADDR(r3, kvmppc_handle_exit) | |
250 | mtctr r3 | |
251 | lwz r3, HOST_RUN(r1) | |
252 | lwz r2, HOST_R2(r1) | |
253 | mr r14, r4 /* Save vcpu pointer. */ | |
254 | ||
255 | bctrl /* kvmppc_handle_exit() */ | |
256 | ||
257 | /* Restore vcpu pointer and the nonvolatiles we used. */ | |
258 | mr r4, r14 | |
c75df6f9 | 259 | lwz r14, VCPU_GPR(R14)(r4) |
bbf45ba5 HB |
260 | |
261 | /* Sometimes instruction emulation must restore complete GPR state. */ | |
262 | andi. r5, r3, RESUME_FLAG_NV | |
263 | beq ..skip_nv_load | |
c75df6f9 MN |
264 | lwz r15, VCPU_GPR(R15)(r4) |
265 | lwz r16, VCPU_GPR(R16)(r4) | |
266 | lwz r17, VCPU_GPR(R17)(r4) | |
267 | lwz r18, VCPU_GPR(R18)(r4) | |
268 | lwz r19, VCPU_GPR(R19)(r4) | |
269 | lwz r20, VCPU_GPR(R20)(r4) | |
270 | lwz r21, VCPU_GPR(R21)(r4) | |
271 | lwz r22, VCPU_GPR(R22)(r4) | |
272 | lwz r23, VCPU_GPR(R23)(r4) | |
273 | lwz r24, VCPU_GPR(R24)(r4) | |
274 | lwz r25, VCPU_GPR(R25)(r4) | |
275 | lwz r26, VCPU_GPR(R26)(r4) | |
276 | lwz r27, VCPU_GPR(R27)(r4) | |
277 | lwz r28, VCPU_GPR(R28)(r4) | |
278 | lwz r29, VCPU_GPR(R29)(r4) | |
279 | lwz r30, VCPU_GPR(R30)(r4) | |
280 | lwz r31, VCPU_GPR(R31)(r4) | |
bbf45ba5 HB |
281 | ..skip_nv_load: |
282 | ||
283 | /* Should we return to the guest? */ | |
284 | andi. r5, r3, RESUME_FLAG_HOST | |
285 | beq lightweight_exit | |
286 | ||
287 | srawi r3, r3, 2 /* Shift -ERR back down. */ | |
288 | ||
289 | heavyweight_exit: | |
290 | /* Not returning to guest. */ | |
291 | ||
4cd35f67 SW |
292 | #ifdef CONFIG_SPE |
293 | /* save guest SPEFSCR and load host SPEFSCR */ | |
294 | mfspr r9, SPRN_SPEFSCR | |
295 | stw r9, VCPU_SPEFSCR(r4) | |
296 | lwz r9, VCPU_HOST_SPEFSCR(r4) | |
297 | mtspr SPRN_SPEFSCR, r9 | |
298 | #endif | |
299 | ||
bbf45ba5 HB |
300 | /* We already saved guest volatile register state; now save the |
301 | * non-volatiles. */ | |
c75df6f9 MN |
302 | stw r15, VCPU_GPR(R15)(r4) |
303 | stw r16, VCPU_GPR(R16)(r4) | |
304 | stw r17, VCPU_GPR(R17)(r4) | |
305 | stw r18, VCPU_GPR(R18)(r4) | |
306 | stw r19, VCPU_GPR(R19)(r4) | |
307 | stw r20, VCPU_GPR(R20)(r4) | |
308 | stw r21, VCPU_GPR(R21)(r4) | |
309 | stw r22, VCPU_GPR(R22)(r4) | |
310 | stw r23, VCPU_GPR(R23)(r4) | |
311 | stw r24, VCPU_GPR(R24)(r4) | |
312 | stw r25, VCPU_GPR(R25)(r4) | |
313 | stw r26, VCPU_GPR(R26)(r4) | |
314 | stw r27, VCPU_GPR(R27)(r4) | |
315 | stw r28, VCPU_GPR(R28)(r4) | |
316 | stw r29, VCPU_GPR(R29)(r4) | |
317 | stw r30, VCPU_GPR(R30)(r4) | |
318 | stw r31, VCPU_GPR(R31)(r4) | |
bbf45ba5 HB |
319 | |
320 | /* Load host non-volatile register state from host stack. */ | |
c75df6f9 MN |
321 | lwz r14, HOST_NV_GPR(R14)(r1) |
322 | lwz r15, HOST_NV_GPR(R15)(r1) | |
323 | lwz r16, HOST_NV_GPR(R16)(r1) | |
324 | lwz r17, HOST_NV_GPR(R17)(r1) | |
325 | lwz r18, HOST_NV_GPR(R18)(r1) | |
326 | lwz r19, HOST_NV_GPR(R19)(r1) | |
327 | lwz r20, HOST_NV_GPR(R20)(r1) | |
328 | lwz r21, HOST_NV_GPR(R21)(r1) | |
329 | lwz r22, HOST_NV_GPR(R22)(r1) | |
330 | lwz r23, HOST_NV_GPR(R23)(r1) | |
331 | lwz r24, HOST_NV_GPR(R24)(r1) | |
332 | lwz r25, HOST_NV_GPR(R25)(r1) | |
333 | lwz r26, HOST_NV_GPR(R26)(r1) | |
334 | lwz r27, HOST_NV_GPR(R27)(r1) | |
335 | lwz r28, HOST_NV_GPR(R28)(r1) | |
336 | lwz r29, HOST_NV_GPR(R29)(r1) | |
337 | lwz r30, HOST_NV_GPR(R30)(r1) | |
338 | lwz r31, HOST_NV_GPR(R31)(r1) | |
bbf45ba5 HB |
339 | |
340 | /* Return to kvm_vcpu_run(). */ | |
341 | lwz r4, HOST_STACK_LR(r1) | |
e1f8acf8 | 342 | lwz r5, HOST_CR(r1) |
bbf45ba5 HB |
343 | addi r1, r1, HOST_STACK_SIZE |
344 | mtlr r4 | |
e1f8acf8 | 345 | mtcr r5 |
bbf45ba5 HB |
346 | /* r3 still contains the return code from kvmppc_handle_exit(). */ |
347 | blr | |
348 | ||
349 | ||
350 | /* Registers: | |
351 | * r3: kvm_run pointer | |
352 | * r4: vcpu pointer | |
353 | */ | |
354 | _GLOBAL(__kvmppc_vcpu_run) | |
355 | stwu r1, -HOST_STACK_SIZE(r1) | |
356 | stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | |
357 | ||
358 | /* Save host state to stack. */ | |
359 | stw r3, HOST_RUN(r1) | |
360 | mflr r3 | |
361 | stw r3, HOST_STACK_LR(r1) | |
e1f8acf8 AG |
362 | mfcr r5 |
363 | stw r5, HOST_CR(r1) | |
bbf45ba5 HB |
364 | |
365 | /* Save host non-volatile register state to stack. */ | |
c75df6f9 MN |
366 | stw r14, HOST_NV_GPR(R14)(r1) |
367 | stw r15, HOST_NV_GPR(R15)(r1) | |
368 | stw r16, HOST_NV_GPR(R16)(r1) | |
369 | stw r17, HOST_NV_GPR(R17)(r1) | |
370 | stw r18, HOST_NV_GPR(R18)(r1) | |
371 | stw r19, HOST_NV_GPR(R19)(r1) | |
372 | stw r20, HOST_NV_GPR(R20)(r1) | |
373 | stw r21, HOST_NV_GPR(R21)(r1) | |
374 | stw r22, HOST_NV_GPR(R22)(r1) | |
375 | stw r23, HOST_NV_GPR(R23)(r1) | |
376 | stw r24, HOST_NV_GPR(R24)(r1) | |
377 | stw r25, HOST_NV_GPR(R25)(r1) | |
378 | stw r26, HOST_NV_GPR(R26)(r1) | |
379 | stw r27, HOST_NV_GPR(R27)(r1) | |
380 | stw r28, HOST_NV_GPR(R28)(r1) | |
381 | stw r29, HOST_NV_GPR(R29)(r1) | |
382 | stw r30, HOST_NV_GPR(R30)(r1) | |
383 | stw r31, HOST_NV_GPR(R31)(r1) | |
bbf45ba5 HB |
384 | |
385 | /* Load guest non-volatiles. */ | |
c75df6f9 MN |
386 | lwz r14, VCPU_GPR(R14)(r4) |
387 | lwz r15, VCPU_GPR(R15)(r4) | |
388 | lwz r16, VCPU_GPR(R16)(r4) | |
389 | lwz r17, VCPU_GPR(R17)(r4) | |
390 | lwz r18, VCPU_GPR(R18)(r4) | |
391 | lwz r19, VCPU_GPR(R19)(r4) | |
392 | lwz r20, VCPU_GPR(R20)(r4) | |
393 | lwz r21, VCPU_GPR(R21)(r4) | |
394 | lwz r22, VCPU_GPR(R22)(r4) | |
395 | lwz r23, VCPU_GPR(R23)(r4) | |
396 | lwz r24, VCPU_GPR(R24)(r4) | |
397 | lwz r25, VCPU_GPR(R25)(r4) | |
398 | lwz r26, VCPU_GPR(R26)(r4) | |
399 | lwz r27, VCPU_GPR(R27)(r4) | |
400 | lwz r28, VCPU_GPR(R28)(r4) | |
401 | lwz r29, VCPU_GPR(R29)(r4) | |
402 | lwz r30, VCPU_GPR(R30)(r4) | |
403 | lwz r31, VCPU_GPR(R31)(r4) | |
bbf45ba5 | 404 | |
4cd35f67 SW |
405 | #ifdef CONFIG_SPE |
406 | /* save host SPEFSCR and load guest SPEFSCR */ | |
407 | mfspr r3, SPRN_SPEFSCR | |
408 | stw r3, VCPU_HOST_SPEFSCR(r4) | |
409 | lwz r3, VCPU_SPEFSCR(r4) | |
410 | mtspr SPRN_SPEFSCR, r3 | |
411 | #endif | |
412 | ||
bbf45ba5 HB |
413 | lightweight_exit: |
414 | stw r2, HOST_R2(r1) | |
415 | ||
416 | mfspr r3, SPRN_PID | |
417 | stw r3, VCPU_HOST_PID(r4) | |
49dd2c49 | 418 | lwz r3, VCPU_SHADOW_PID(r4) |
bbf45ba5 HB |
419 | mtspr SPRN_PID, r3 |
420 | ||
dd9ebf1f LY |
421 | #ifdef CONFIG_FSL_BOOKE |
422 | lwz r3, VCPU_SHADOW_PID1(r4) | |
423 | mtspr SPRN_PID1, r3 | |
424 | #endif | |
425 | ||
bbf45ba5 | 426 | /* Load some guest volatiles. */ |
c75df6f9 MN |
427 | lwz r0, VCPU_GPR(R0)(r4) |
428 | lwz r2, VCPU_GPR(R2)(r4) | |
429 | lwz r9, VCPU_GPR(R9)(r4) | |
430 | lwz r10, VCPU_GPR(R10)(r4) | |
431 | lwz r11, VCPU_GPR(R11)(r4) | |
432 | lwz r12, VCPU_GPR(R12)(r4) | |
433 | lwz r13, VCPU_GPR(R13)(r4) | |
bbf45ba5 HB |
434 | lwz r3, VCPU_LR(r4) |
435 | mtlr r3 | |
436 | lwz r3, VCPU_XER(r4) | |
437 | mtxer r3 | |
438 | ||
439 | /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, | |
440 | * so how do we make sure vcpu won't fault? */ | |
441 | lis r8, kvmppc_booke_handlers@ha | |
442 | lwz r8, kvmppc_booke_handlers@l(r8) | |
443 | mtspr SPRN_IVPR, r8 | |
444 | ||
b5904972 SW |
445 | lwz r5, VCPU_SHARED(r4) |
446 | ||
bbf45ba5 HB |
447 | /* Can't switch the stack pointer until after IVPR is switched, |
448 | * because host interrupt handlers would get confused. */ | |
c75df6f9 | 449 | lwz r1, VCPU_GPR(R1)(r4) |
bbf45ba5 | 450 | |
b5904972 SW |
451 | /* |
452 | * Host interrupt handlers may have clobbered these | |
453 | * guest-readable SPRGs, or the guest kernel may have | |
454 | * written directly to the shared area, so we | |
455 | * need to reload them here with the guest's values. | |
456 | */ | |
30124906 | 457 | PPC_LD(r3, VCPU_SHARED_SPRG4, r5) |
ee43eb78 | 458 | mtspr SPRN_SPRG4W, r3 |
30124906 | 459 | PPC_LD(r3, VCPU_SHARED_SPRG5, r5) |
ee43eb78 | 460 | mtspr SPRN_SPRG5W, r3 |
30124906 | 461 | PPC_LD(r3, VCPU_SHARED_SPRG6, r5) |
ee43eb78 | 462 | mtspr SPRN_SPRG6W, r3 |
30124906 | 463 | PPC_LD(r3, VCPU_SHARED_SPRG7, r5) |
ee43eb78 | 464 | mtspr SPRN_SPRG7W, r3 |
bbf45ba5 | 465 | |
73e75b41 HB |
466 | #ifdef CONFIG_KVM_EXIT_TIMING |
467 | /* save enter time */ | |
468 | 1: | |
469 | mfspr r6, SPRN_TBRU | |
470 | mfspr r7, SPRN_TBRL | |
471 | mfspr r8, SPRN_TBRU | |
472 | cmpw r8, r6 | |
473 | bne 1b | |
474 | stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | |
475 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | |
476 | #endif | |
477 | ||
bbf45ba5 HB |
478 | /* Finish loading guest volatiles and jump to guest. */ |
479 | lwz r3, VCPU_CTR(r4) | |
ecee273f SW |
480 | lwz r5, VCPU_CR(r4) |
481 | lwz r6, VCPU_PC(r4) | |
482 | lwz r7, VCPU_SHADOW_MSR(r4) | |
bbf45ba5 | 483 | mtctr r3 |
ecee273f SW |
484 | mtcr r5 |
485 | mtsrr0 r6 | |
486 | mtsrr1 r7 | |
c75df6f9 MN |
487 | lwz r5, VCPU_GPR(R5)(r4) |
488 | lwz r6, VCPU_GPR(R6)(r4) | |
489 | lwz r7, VCPU_GPR(R7)(r4) | |
490 | lwz r8, VCPU_GPR(R8)(r4) | |
6a0ab738 HB |
491 | |
492 | /* Clear any debug events which occurred since we disabled MSR[DE]. | |
493 | * XXX This gives us a 3-instruction window in which a breakpoint | |
494 | * intended for guest context could fire in the host instead. */ | |
495 | lis r3, 0xffff | |
496 | ori r3, r3, 0xffff | |
497 | mtspr SPRN_DBSR, r3 | |
498 | ||
c75df6f9 MN |
499 | lwz r3, VCPU_GPR(R3)(r4) |
500 | lwz r4, VCPU_GPR(R4)(r4) | |
bbf45ba5 | 501 | rfi |
4cd35f67 | 502 | |
1d542d9c BB |
503 | .data |
504 | .align 4 | |
505 | .globl kvmppc_booke_handler_addr | |
506 | kvmppc_booke_handler_addr: | |
507 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL | |
508 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK | |
509 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE | |
510 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE | |
511 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL | |
512 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT | |
513 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM | |
514 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL | |
515 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL | |
516 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL | |
517 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER | |
518 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT | |
519 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG | |
520 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS | |
521 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS | |
522 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG | |
523 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL | |
524 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA | |
525 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND | |
526 | KVM_HANDLER_END /*Always keep this in end*/ | |
527 | ||
4cd35f67 SW |
528 | #ifdef CONFIG_SPE |
529 | _GLOBAL(kvmppc_save_guest_spe) | |
530 | cmpi 0,r3,0 | |
531 | beqlr- | |
532 | SAVE_32EVRS(0, r4, r3, VCPU_EVR) | |
533 | evxor evr6, evr6, evr6 | |
534 | evmwumiaa evr6, evr6, evr6 | |
535 | li r4,VCPU_ACC | |
536 | evstddx evr6, r4, r3 /* save acc */ | |
537 | blr | |
538 | ||
539 | _GLOBAL(kvmppc_load_guest_spe) | |
540 | cmpi 0,r3,0 | |
541 | beqlr- | |
542 | li r4,VCPU_ACC | |
543 | evlddx evr6,r4,r3 | |
544 | evmra evr6,evr6 /* load acc */ | |
545 | REST_32EVRS(0, r4, r3, VCPU_EVR) | |
546 | blr | |
547 | #endif |