Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
4cd35f67 | 16 | * Copyright 2011 Freescale Semiconductor, Inc. |
bbf45ba5 HB |
17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <asm/ppc_asm.h> | |
22 | #include <asm/kvm_asm.h> | |
23 | #include <asm/reg.h> | |
24 | #include <asm/mmu-44x.h> | |
25 | #include <asm/page.h> | |
26 | #include <asm/asm-offsets.h> | |
27 | ||
bbf45ba5 HB |
28 | #define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) |
29 | ||
30 | /* The host stack layout: */ | |
31 | #define HOST_R1 0 /* Implied by stwu. */ | |
32 | #define HOST_CALLEE_LR 4 | |
33 | #define HOST_RUN 8 | |
34 | /* r2 is special: it holds 'current', and it made nonvolatile in the | |
35 | * kernel with the -ffixed-r2 gcc option. */ | |
36 | #define HOST_R2 12 | |
37 | #define HOST_NV_GPRS 16 | |
38 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) | |
39 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) | |
40 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ | |
41 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | |
42 | ||
43 | #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ | |
6a0ab738 HB |
44 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
45 | (1<<BOOKE_INTERRUPT_DEBUG)) | |
bbf45ba5 HB |
46 | |
47 | #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | |
48 | (1<<BOOKE_INTERRUPT_DTLB_MISS)) | |
49 | ||
50 | #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | |
51 | (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ | |
52 | (1<<BOOKE_INTERRUPT_PROGRAM) | \ | |
53 | (1<<BOOKE_INTERRUPT_DTLB_MISS)) | |
54 | ||
55 | .macro KVM_HANDLER ivor_nr | |
56 | _GLOBAL(kvmppc_handler_\ivor_nr) | |
57 | /* Get pointer to vcpu and record exit number. */ | |
ee43eb78 BH |
58 | mtspr SPRN_SPRG_WSCRATCH0, r4 |
59 | mfspr r4, SPRN_SPRG_RVCPU | |
bbf45ba5 HB |
60 | stw r5, VCPU_GPR(r5)(r4) |
61 | stw r6, VCPU_GPR(r6)(r4) | |
62 | mfctr r5 | |
63 | lis r6, kvmppc_resume_host@h | |
64 | stw r5, VCPU_CTR(r4) | |
65 | li r5, \ivor_nr | |
66 | ori r6, r6, kvmppc_resume_host@l | |
67 | mtctr r6 | |
68 | bctr | |
69 | .endm | |
70 | ||
71 | _GLOBAL(kvmppc_handlers_start) | |
72 | KVM_HANDLER BOOKE_INTERRUPT_CRITICAL | |
73 | KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK | |
74 | KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE | |
75 | KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE | |
76 | KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL | |
77 | KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT | |
78 | KVM_HANDLER BOOKE_INTERRUPT_PROGRAM | |
79 | KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL | |
80 | KVM_HANDLER BOOKE_INTERRUPT_SYSCALL | |
81 | KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL | |
82 | KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER | |
83 | KVM_HANDLER BOOKE_INTERRUPT_FIT | |
84 | KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG | |
85 | KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS | |
86 | KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS | |
87 | KVM_HANDLER BOOKE_INTERRUPT_DEBUG | |
bb3a8a17 HB |
88 | KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL |
89 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA | |
90 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND | |
bbf45ba5 HB |
91 | |
92 | _GLOBAL(kvmppc_handler_len) | |
93 | .long kvmppc_handler_1 - kvmppc_handler_0 | |
94 | ||
95 | ||
96 | /* Registers: | |
ee43eb78 | 97 | * SPRG_SCRATCH0: guest r4 |
bbf45ba5 HB |
98 | * r4: vcpu pointer |
99 | * r5: KVM exit number | |
100 | */ | |
101 | _GLOBAL(kvmppc_resume_host) | |
102 | stw r3, VCPU_GPR(r3)(r4) | |
103 | mfcr r3 | |
104 | stw r3, VCPU_CR(r4) | |
105 | stw r7, VCPU_GPR(r7)(r4) | |
106 | stw r8, VCPU_GPR(r8)(r4) | |
107 | stw r9, VCPU_GPR(r9)(r4) | |
108 | ||
109 | li r6, 1 | |
110 | slw r6, r6, r5 | |
111 | ||
73e75b41 HB |
112 | #ifdef CONFIG_KVM_EXIT_TIMING |
113 | /* save exit time */ | |
114 | 1: | |
115 | mfspr r7, SPRN_TBRU | |
116 | mfspr r8, SPRN_TBRL | |
117 | mfspr r9, SPRN_TBRU | |
118 | cmpw r9, r7 | |
119 | bne 1b | |
120 | stw r8, VCPU_TIMING_EXIT_TBL(r4) | |
121 | stw r9, VCPU_TIMING_EXIT_TBU(r4) | |
122 | #endif | |
123 | ||
bbf45ba5 HB |
124 | /* Save the faulting instruction and all GPRs for emulation. */ |
125 | andi. r7, r6, NEED_INST_MASK | |
126 | beq ..skip_inst_copy | |
127 | mfspr r9, SPRN_SRR0 | |
128 | mfmsr r8 | |
129 | ori r7, r8, MSR_DS | |
130 | mtmsr r7 | |
131 | isync | |
132 | lwz r9, 0(r9) | |
133 | mtmsr r8 | |
134 | isync | |
135 | stw r9, VCPU_LAST_INST(r4) | |
136 | ||
137 | stw r15, VCPU_GPR(r15)(r4) | |
138 | stw r16, VCPU_GPR(r16)(r4) | |
139 | stw r17, VCPU_GPR(r17)(r4) | |
140 | stw r18, VCPU_GPR(r18)(r4) | |
141 | stw r19, VCPU_GPR(r19)(r4) | |
142 | stw r20, VCPU_GPR(r20)(r4) | |
143 | stw r21, VCPU_GPR(r21)(r4) | |
144 | stw r22, VCPU_GPR(r22)(r4) | |
145 | stw r23, VCPU_GPR(r23)(r4) | |
146 | stw r24, VCPU_GPR(r24)(r4) | |
147 | stw r25, VCPU_GPR(r25)(r4) | |
148 | stw r26, VCPU_GPR(r26)(r4) | |
149 | stw r27, VCPU_GPR(r27)(r4) | |
150 | stw r28, VCPU_GPR(r28)(r4) | |
151 | stw r29, VCPU_GPR(r29)(r4) | |
152 | stw r30, VCPU_GPR(r30)(r4) | |
153 | stw r31, VCPU_GPR(r31)(r4) | |
154 | ..skip_inst_copy: | |
155 | ||
156 | /* Also grab DEAR and ESR before the host can clobber them. */ | |
157 | ||
158 | andi. r7, r6, NEED_DEAR_MASK | |
159 | beq ..skip_dear | |
160 | mfspr r9, SPRN_DEAR | |
161 | stw r9, VCPU_FAULT_DEAR(r4) | |
162 | ..skip_dear: | |
163 | ||
164 | andi. r7, r6, NEED_ESR_MASK | |
165 | beq ..skip_esr | |
166 | mfspr r9, SPRN_ESR | |
167 | stw r9, VCPU_FAULT_ESR(r4) | |
168 | ..skip_esr: | |
169 | ||
170 | /* Save remaining volatile guest register state to vcpu. */ | |
171 | stw r0, VCPU_GPR(r0)(r4) | |
172 | stw r1, VCPU_GPR(r1)(r4) | |
173 | stw r2, VCPU_GPR(r2)(r4) | |
174 | stw r10, VCPU_GPR(r10)(r4) | |
175 | stw r11, VCPU_GPR(r11)(r4) | |
176 | stw r12, VCPU_GPR(r12)(r4) | |
177 | stw r13, VCPU_GPR(r13)(r4) | |
178 | stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ | |
179 | mflr r3 | |
180 | stw r3, VCPU_LR(r4) | |
181 | mfxer r3 | |
182 | stw r3, VCPU_XER(r4) | |
ee43eb78 | 183 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
bbf45ba5 HB |
184 | stw r3, VCPU_GPR(r4)(r4) |
185 | mfspr r3, SPRN_SRR0 | |
186 | stw r3, VCPU_PC(r4) | |
187 | ||
188 | /* Restore host stack pointer and PID before IVPR, since the host | |
189 | * exception handlers use them. */ | |
190 | lwz r1, VCPU_HOST_STACK(r4) | |
191 | lwz r3, VCPU_HOST_PID(r4) | |
192 | mtspr SPRN_PID, r3 | |
193 | ||
194 | /* Restore host IVPR before re-enabling interrupts. We cheat and know | |
195 | * that Linux IVPR is always 0xc0000000. */ | |
196 | lis r3, 0xc000 | |
197 | mtspr SPRN_IVPR, r3 | |
198 | ||
199 | /* Switch to kernel stack and jump to handler. */ | |
200 | LOAD_REG_ADDR(r3, kvmppc_handle_exit) | |
201 | mtctr r3 | |
202 | lwz r3, HOST_RUN(r1) | |
203 | lwz r2, HOST_R2(r1) | |
204 | mr r14, r4 /* Save vcpu pointer. */ | |
205 | ||
206 | bctrl /* kvmppc_handle_exit() */ | |
207 | ||
208 | /* Restore vcpu pointer and the nonvolatiles we used. */ | |
209 | mr r4, r14 | |
210 | lwz r14, VCPU_GPR(r14)(r4) | |
211 | ||
212 | /* Sometimes instruction emulation must restore complete GPR state. */ | |
213 | andi. r5, r3, RESUME_FLAG_NV | |
214 | beq ..skip_nv_load | |
215 | lwz r15, VCPU_GPR(r15)(r4) | |
216 | lwz r16, VCPU_GPR(r16)(r4) | |
217 | lwz r17, VCPU_GPR(r17)(r4) | |
218 | lwz r18, VCPU_GPR(r18)(r4) | |
219 | lwz r19, VCPU_GPR(r19)(r4) | |
220 | lwz r20, VCPU_GPR(r20)(r4) | |
221 | lwz r21, VCPU_GPR(r21)(r4) | |
222 | lwz r22, VCPU_GPR(r22)(r4) | |
223 | lwz r23, VCPU_GPR(r23)(r4) | |
224 | lwz r24, VCPU_GPR(r24)(r4) | |
225 | lwz r25, VCPU_GPR(r25)(r4) | |
226 | lwz r26, VCPU_GPR(r26)(r4) | |
227 | lwz r27, VCPU_GPR(r27)(r4) | |
228 | lwz r28, VCPU_GPR(r28)(r4) | |
229 | lwz r29, VCPU_GPR(r29)(r4) | |
230 | lwz r30, VCPU_GPR(r30)(r4) | |
231 | lwz r31, VCPU_GPR(r31)(r4) | |
232 | ..skip_nv_load: | |
233 | ||
234 | /* Should we return to the guest? */ | |
235 | andi. r5, r3, RESUME_FLAG_HOST | |
236 | beq lightweight_exit | |
237 | ||
238 | srawi r3, r3, 2 /* Shift -ERR back down. */ | |
239 | ||
240 | heavyweight_exit: | |
241 | /* Not returning to guest. */ | |
242 | ||
4cd35f67 SW |
243 | #ifdef CONFIG_SPE |
244 | /* save guest SPEFSCR and load host SPEFSCR */ | |
245 | mfspr r9, SPRN_SPEFSCR | |
246 | stw r9, VCPU_SPEFSCR(r4) | |
247 | lwz r9, VCPU_HOST_SPEFSCR(r4) | |
248 | mtspr SPRN_SPEFSCR, r9 | |
249 | #endif | |
250 | ||
bbf45ba5 HB |
251 | /* We already saved guest volatile register state; now save the |
252 | * non-volatiles. */ | |
253 | stw r15, VCPU_GPR(r15)(r4) | |
254 | stw r16, VCPU_GPR(r16)(r4) | |
255 | stw r17, VCPU_GPR(r17)(r4) | |
256 | stw r18, VCPU_GPR(r18)(r4) | |
257 | stw r19, VCPU_GPR(r19)(r4) | |
258 | stw r20, VCPU_GPR(r20)(r4) | |
259 | stw r21, VCPU_GPR(r21)(r4) | |
260 | stw r22, VCPU_GPR(r22)(r4) | |
261 | stw r23, VCPU_GPR(r23)(r4) | |
262 | stw r24, VCPU_GPR(r24)(r4) | |
263 | stw r25, VCPU_GPR(r25)(r4) | |
264 | stw r26, VCPU_GPR(r26)(r4) | |
265 | stw r27, VCPU_GPR(r27)(r4) | |
266 | stw r28, VCPU_GPR(r28)(r4) | |
267 | stw r29, VCPU_GPR(r29)(r4) | |
268 | stw r30, VCPU_GPR(r30)(r4) | |
269 | stw r31, VCPU_GPR(r31)(r4) | |
270 | ||
271 | /* Load host non-volatile register state from host stack. */ | |
272 | lwz r14, HOST_NV_GPR(r14)(r1) | |
273 | lwz r15, HOST_NV_GPR(r15)(r1) | |
274 | lwz r16, HOST_NV_GPR(r16)(r1) | |
275 | lwz r17, HOST_NV_GPR(r17)(r1) | |
276 | lwz r18, HOST_NV_GPR(r18)(r1) | |
277 | lwz r19, HOST_NV_GPR(r19)(r1) | |
278 | lwz r20, HOST_NV_GPR(r20)(r1) | |
279 | lwz r21, HOST_NV_GPR(r21)(r1) | |
280 | lwz r22, HOST_NV_GPR(r22)(r1) | |
281 | lwz r23, HOST_NV_GPR(r23)(r1) | |
282 | lwz r24, HOST_NV_GPR(r24)(r1) | |
283 | lwz r25, HOST_NV_GPR(r25)(r1) | |
284 | lwz r26, HOST_NV_GPR(r26)(r1) | |
285 | lwz r27, HOST_NV_GPR(r27)(r1) | |
286 | lwz r28, HOST_NV_GPR(r28)(r1) | |
287 | lwz r29, HOST_NV_GPR(r29)(r1) | |
288 | lwz r30, HOST_NV_GPR(r30)(r1) | |
289 | lwz r31, HOST_NV_GPR(r31)(r1) | |
290 | ||
291 | /* Return to kvm_vcpu_run(). */ | |
292 | lwz r4, HOST_STACK_LR(r1) | |
293 | addi r1, r1, HOST_STACK_SIZE | |
294 | mtlr r4 | |
295 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | |
296 | blr | |
297 | ||
298 | ||
299 | /* Registers: | |
300 | * r3: kvm_run pointer | |
301 | * r4: vcpu pointer | |
302 | */ | |
303 | _GLOBAL(__kvmppc_vcpu_run) | |
304 | stwu r1, -HOST_STACK_SIZE(r1) | |
305 | stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | |
306 | ||
307 | /* Save host state to stack. */ | |
308 | stw r3, HOST_RUN(r1) | |
309 | mflr r3 | |
310 | stw r3, HOST_STACK_LR(r1) | |
311 | ||
312 | /* Save host non-volatile register state to stack. */ | |
313 | stw r14, HOST_NV_GPR(r14)(r1) | |
314 | stw r15, HOST_NV_GPR(r15)(r1) | |
315 | stw r16, HOST_NV_GPR(r16)(r1) | |
316 | stw r17, HOST_NV_GPR(r17)(r1) | |
317 | stw r18, HOST_NV_GPR(r18)(r1) | |
318 | stw r19, HOST_NV_GPR(r19)(r1) | |
319 | stw r20, HOST_NV_GPR(r20)(r1) | |
320 | stw r21, HOST_NV_GPR(r21)(r1) | |
321 | stw r22, HOST_NV_GPR(r22)(r1) | |
322 | stw r23, HOST_NV_GPR(r23)(r1) | |
323 | stw r24, HOST_NV_GPR(r24)(r1) | |
324 | stw r25, HOST_NV_GPR(r25)(r1) | |
325 | stw r26, HOST_NV_GPR(r26)(r1) | |
326 | stw r27, HOST_NV_GPR(r27)(r1) | |
327 | stw r28, HOST_NV_GPR(r28)(r1) | |
328 | stw r29, HOST_NV_GPR(r29)(r1) | |
329 | stw r30, HOST_NV_GPR(r30)(r1) | |
330 | stw r31, HOST_NV_GPR(r31)(r1) | |
331 | ||
332 | /* Load guest non-volatiles. */ | |
333 | lwz r14, VCPU_GPR(r14)(r4) | |
334 | lwz r15, VCPU_GPR(r15)(r4) | |
335 | lwz r16, VCPU_GPR(r16)(r4) | |
336 | lwz r17, VCPU_GPR(r17)(r4) | |
337 | lwz r18, VCPU_GPR(r18)(r4) | |
338 | lwz r19, VCPU_GPR(r19)(r4) | |
339 | lwz r20, VCPU_GPR(r20)(r4) | |
340 | lwz r21, VCPU_GPR(r21)(r4) | |
341 | lwz r22, VCPU_GPR(r22)(r4) | |
342 | lwz r23, VCPU_GPR(r23)(r4) | |
343 | lwz r24, VCPU_GPR(r24)(r4) | |
344 | lwz r25, VCPU_GPR(r25)(r4) | |
345 | lwz r26, VCPU_GPR(r26)(r4) | |
346 | lwz r27, VCPU_GPR(r27)(r4) | |
347 | lwz r28, VCPU_GPR(r28)(r4) | |
348 | lwz r29, VCPU_GPR(r29)(r4) | |
349 | lwz r30, VCPU_GPR(r30)(r4) | |
350 | lwz r31, VCPU_GPR(r31)(r4) | |
351 | ||
4cd35f67 SW |
352 | #ifdef CONFIG_SPE |
353 | /* save host SPEFSCR and load guest SPEFSCR */ | |
354 | mfspr r3, SPRN_SPEFSCR | |
355 | stw r3, VCPU_HOST_SPEFSCR(r4) | |
356 | lwz r3, VCPU_SPEFSCR(r4) | |
357 | mtspr SPRN_SPEFSCR, r3 | |
358 | #endif | |
359 | ||
bbf45ba5 HB |
360 | lightweight_exit: |
361 | stw r2, HOST_R2(r1) | |
362 | ||
363 | mfspr r3, SPRN_PID | |
364 | stw r3, VCPU_HOST_PID(r4) | |
49dd2c49 | 365 | lwz r3, VCPU_SHADOW_PID(r4) |
bbf45ba5 HB |
366 | mtspr SPRN_PID, r3 |
367 | ||
17c885eb | 368 | #ifdef CONFIG_44x |
bbf45ba5 | 369 | iccci 0, 0 /* XXX hack */ |
17c885eb | 370 | #endif |
bbf45ba5 HB |
371 | |
372 | /* Load some guest volatiles. */ | |
373 | lwz r0, VCPU_GPR(r0)(r4) | |
374 | lwz r2, VCPU_GPR(r2)(r4) | |
375 | lwz r9, VCPU_GPR(r9)(r4) | |
376 | lwz r10, VCPU_GPR(r10)(r4) | |
377 | lwz r11, VCPU_GPR(r11)(r4) | |
378 | lwz r12, VCPU_GPR(r12)(r4) | |
379 | lwz r13, VCPU_GPR(r13)(r4) | |
380 | lwz r3, VCPU_LR(r4) | |
381 | mtlr r3 | |
382 | lwz r3, VCPU_XER(r4) | |
383 | mtxer r3 | |
384 | ||
385 | /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, | |
386 | * so how do we make sure vcpu won't fault? */ | |
387 | lis r8, kvmppc_booke_handlers@ha | |
388 | lwz r8, kvmppc_booke_handlers@l(r8) | |
389 | mtspr SPRN_IVPR, r8 | |
390 | ||
391 | /* Save vcpu pointer for the exception handlers. */ | |
ee43eb78 | 392 | mtspr SPRN_SPRG_WVCPU, r4 |
bbf45ba5 HB |
393 | |
394 | /* Can't switch the stack pointer until after IVPR is switched, | |
395 | * because host interrupt handlers would get confused. */ | |
396 | lwz r1, VCPU_GPR(r1)(r4) | |
397 | ||
bbf45ba5 HB |
398 | /* Host interrupt handlers may have clobbered these guest-readable |
399 | * SPRGs, so we need to reload them here with the guest's values. */ | |
400 | lwz r3, VCPU_SPRG4(r4) | |
ee43eb78 | 401 | mtspr SPRN_SPRG4W, r3 |
bbf45ba5 | 402 | lwz r3, VCPU_SPRG5(r4) |
ee43eb78 | 403 | mtspr SPRN_SPRG5W, r3 |
bbf45ba5 | 404 | lwz r3, VCPU_SPRG6(r4) |
ee43eb78 | 405 | mtspr SPRN_SPRG6W, r3 |
bbf45ba5 | 406 | lwz r3, VCPU_SPRG7(r4) |
ee43eb78 | 407 | mtspr SPRN_SPRG7W, r3 |
bbf45ba5 | 408 | |
73e75b41 HB |
409 | #ifdef CONFIG_KVM_EXIT_TIMING |
410 | /* save enter time */ | |
411 | 1: | |
412 | mfspr r6, SPRN_TBRU | |
413 | mfspr r7, SPRN_TBRL | |
414 | mfspr r8, SPRN_TBRU | |
415 | cmpw r8, r6 | |
416 | bne 1b | |
417 | stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | |
418 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | |
419 | #endif | |
420 | ||
bbf45ba5 HB |
421 | /* Finish loading guest volatiles and jump to guest. */ |
422 | lwz r3, VCPU_CTR(r4) | |
ecee273f SW |
423 | lwz r5, VCPU_CR(r4) |
424 | lwz r6, VCPU_PC(r4) | |
425 | lwz r7, VCPU_SHADOW_MSR(r4) | |
bbf45ba5 | 426 | mtctr r3 |
ecee273f SW |
427 | mtcr r5 |
428 | mtsrr0 r6 | |
429 | mtsrr1 r7 | |
bbf45ba5 HB |
430 | lwz r5, VCPU_GPR(r5)(r4) |
431 | lwz r6, VCPU_GPR(r6)(r4) | |
432 | lwz r7, VCPU_GPR(r7)(r4) | |
433 | lwz r8, VCPU_GPR(r8)(r4) | |
6a0ab738 HB |
434 | |
435 | /* Clear any debug events which occurred since we disabled MSR[DE]. | |
436 | * XXX This gives us a 3-instruction window in which a breakpoint | |
437 | * intended for guest context could fire in the host instead. */ | |
438 | lis r3, 0xffff | |
439 | ori r3, r3, 0xffff | |
440 | mtspr SPRN_DBSR, r3 | |
441 | ||
bbf45ba5 HB |
442 | lwz r3, VCPU_GPR(r3)(r4) |
443 | lwz r4, VCPU_GPR(r4)(r4) | |
444 | rfi | |
4cd35f67 SW |
445 | |
446 | #ifdef CONFIG_SPE | |
447 | _GLOBAL(kvmppc_save_guest_spe) | |
448 | cmpi 0,r3,0 | |
449 | beqlr- | |
450 | SAVE_32EVRS(0, r4, r3, VCPU_EVR) | |
451 | evxor evr6, evr6, evr6 | |
452 | evmwumiaa evr6, evr6, evr6 | |
453 | li r4,VCPU_ACC | |
454 | evstddx evr6, r4, r3 /* save acc */ | |
455 | blr | |
456 | ||
457 | _GLOBAL(kvmppc_load_guest_spe) | |
458 | cmpi 0,r3,0 | |
459 | beqlr- | |
460 | li r4,VCPU_ACC | |
461 | evlddx evr6,r4,r3 | |
462 | evmra evr6,evr6 /* load acc */ | |
463 | REST_32EVRS(0, r4, r3, VCPU_EVR) | |
464 | blr | |
465 | #endif |