Commit | Line | Data |
---|---|---|
d30f6e48 SW |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | |
16 | * | |
17 | * Author: Varun Sethi <varun.sethi@freescale.com> | |
18 | * Author: Scott Wood <scotwood@freescale.com> | |
19 | * | |
20 | * This file is derived from arch/powerpc/kvm/booke_interrupts.S | |
21 | */ | |
22 | ||
23 | #include <asm/ppc_asm.h> | |
24 | #include <asm/kvm_asm.h> | |
25 | #include <asm/reg.h> | |
26 | #include <asm/mmu-44x.h> | |
27 | #include <asm/page.h> | |
28 | #include <asm/asm-compat.h> | |
29 | #include <asm/asm-offsets.h> | |
30 | #include <asm/bitsperlong.h> | |
1d628af7 | 31 | #include <asm/thread_info.h> |
d30f6e48 SW |
32 | |
33 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ | |
34 | ||
35 | #define GET_VCPU(vcpu, thread) \ | |
36 | PPC_LL vcpu, THREAD_KVM_VCPU(thread) | |
37 | ||
d30f6e48 SW |
38 | #define LONGBYTES (BITS_PER_LONG / 8) |
39 | ||
40 | #define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES)) | |
41 | #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) | |
42 | ||
43 | /* The host stack layout: */ | |
44 | #define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ | |
45 | #define HOST_CALLEE_LR (1 * LONGBYTES) | |
46 | #define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ | |
47 | /* | |
48 | * r2 is special: it holds 'current', and it made nonvolatile in the | |
49 | * kernel with the -ffixed-r2 gcc option. | |
50 | */ | |
51 | #define HOST_R2 (3 * LONGBYTES) | |
f6127716 AG |
52 | #define HOST_CR (4 * LONGBYTES) |
53 | #define HOST_NV_GPRS (5 * LONGBYTES) | |
d30f6e48 SW |
54 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) |
55 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES) | |
56 | #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ | |
57 | #define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ | |
58 | ||
59 | #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ | |
60 | #define NEED_DEAR 0x00000002 /* save faulting DEAR */ | |
61 | #define NEED_ESR 0x00000004 /* save faulting ESR */ | |
62 | ||
63 | /* | |
64 | * On entry: | |
65 | * r4 = vcpu, r5 = srr0, r6 = srr1 | |
66 | * saved in vcpu: cr, ctr, r3-r13 | |
67 | */ | |
68 | .macro kvm_handler_common intno, srr0, flags | |
a2723ce7 AG |
69 | /* Restore host stack pointer */ |
70 | PPC_STL r1, VCPU_GPR(r1)(r4) | |
71 | PPC_STL r2, VCPU_GPR(r2)(r4) | |
72 | PPC_LL r1, VCPU_HOST_STACK(r4) | |
73 | PPC_LL r2, HOST_R2(r1) | |
74 | ||
d30f6e48 SW |
75 | mfspr r10, SPRN_PID |
76 | lwz r8, VCPU_HOST_PID(r4) | |
77 | PPC_LL r11, VCPU_SHARED(r4) | |
78 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ | |
79 | li r14, \intno | |
80 | ||
81 | stw r10, VCPU_GUEST_PID(r4) | |
82 | mtspr SPRN_PID, r8 | |
83 | ||
d30f6e48 SW |
84 | #ifdef CONFIG_KVM_EXIT_TIMING |
85 | /* save exit time */ | |
86 | 1: mfspr r7, SPRN_TBRU | |
87 | mfspr r8, SPRN_TBRL | |
88 | mfspr r9, SPRN_TBRU | |
89 | cmpw r9, r7 | |
90 | PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4) | |
91 | bne- 1b | |
92 | PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4) | |
93 | #endif | |
94 | ||
95 | oris r8, r6, MSR_CE@h | |
8764b46e | 96 | #ifdef CONFIG_64BIT |
d30f6e48 | 97 | std r6, (VCPU_SHARED_MSR)(r11) |
8764b46e AG |
98 | #else |
99 | stw r6, (VCPU_SHARED_MSR + 4)(r11) | |
d30f6e48 SW |
100 | #endif |
101 | ori r8, r8, MSR_ME | MSR_RI | |
102 | PPC_STL r5, VCPU_PC(r4) | |
103 | ||
104 | /* | |
105 | * Make sure CE/ME/RI are set (if appropriate for exception type) | |
106 | * whether or not the guest had it set. Since mfmsr/mtmsr are | |
107 | * somewhat expensive, skip in the common case where the guest | |
108 | * had all these bits set (and thus they're still set if | |
109 | * appropriate for the exception type). | |
110 | */ | |
111 | cmpw r6, r8 | |
d30f6e48 SW |
112 | beq 1f |
113 | mfmsr r7 | |
114 | .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 | |
115 | oris r7, r7, MSR_CE@h | |
116 | .endif | |
117 | .if \srr0 != SPRN_MCSRR0 | |
118 | ori r7, r7, MSR_ME | MSR_RI | |
119 | .endif | |
120 | mtmsr r7 | |
121 | 1: | |
122 | ||
123 | .if \flags & NEED_EMU | |
124 | /* | |
125 | * This assumes you have external PID support. | |
126 | * To support a bookehv CPU without external PID, you'll | |
127 | * need to look up the TLB entry and create a temporary mapping. | |
128 | * | |
129 | * FIXME: we don't currently handle if the lwepx faults. PR-mode | |
130 | * booke doesn't handle it either. Since Linux doesn't use | |
131 | * broadcast tlbivax anymore, the only way this should happen is | |
132 | * if the guest maps its memory execute-but-not-read, or if we | |
133 | * somehow take a TLB miss in the middle of this entry code and | |
134 | * evict the relevant entry. On e500mc, all kernel lowmem is | |
135 | * bolted into TLB1 large page mappings, and we don't use | |
136 | * broadcast invalidates, so we should not take a TLB miss here. | |
137 | * | |
138 | * Later we'll need to deal with faults here. Disallowing guest | |
139 | * mappings that are execute-but-not-read could be an option on | |
140 | * e500mc, but not on chips with an LRAT if it is used. | |
141 | */ | |
142 | ||
143 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | |
144 | PPC_STL r15, VCPU_GPR(r15)(r4) | |
145 | PPC_STL r16, VCPU_GPR(r16)(r4) | |
146 | PPC_STL r17, VCPU_GPR(r17)(r4) | |
147 | PPC_STL r18, VCPU_GPR(r18)(r4) | |
148 | PPC_STL r19, VCPU_GPR(r19)(r4) | |
149 | mr r8, r3 | |
150 | PPC_STL r20, VCPU_GPR(r20)(r4) | |
151 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | |
152 | PPC_STL r21, VCPU_GPR(r21)(r4) | |
153 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | |
154 | PPC_STL r22, VCPU_GPR(r22)(r4) | |
155 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | |
156 | PPC_STL r23, VCPU_GPR(r23)(r4) | |
157 | PPC_STL r24, VCPU_GPR(r24)(r4) | |
158 | PPC_STL r25, VCPU_GPR(r25)(r4) | |
159 | PPC_STL r26, VCPU_GPR(r26)(r4) | |
160 | PPC_STL r27, VCPU_GPR(r27)(r4) | |
161 | PPC_STL r28, VCPU_GPR(r28)(r4) | |
162 | PPC_STL r29, VCPU_GPR(r29)(r4) | |
163 | PPC_STL r30, VCPU_GPR(r30)(r4) | |
164 | PPC_STL r31, VCPU_GPR(r31)(r4) | |
165 | mtspr SPRN_EPLC, r8 | |
1d628af7 AG |
166 | |
167 | /* disable preemption, so we are sure we hit the fixup handler */ | |
168 | #ifdef CONFIG_PPC64 | |
169 | clrrdi r8,r1,THREAD_SHIFT | |
170 | #else | |
171 | rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | |
172 | #endif | |
173 | li r7, 1 | |
174 | stw r7, TI_PREEMPT(r8) | |
175 | ||
d30f6e48 | 176 | isync |
1d628af7 AG |
177 | |
178 | /* | |
179 | * In case the read goes wrong, we catch it and write an invalid value | |
180 | * in LAST_INST instead. | |
181 | */ | |
182 | 1: lwepx r9, 0, r5 | |
183 | 2: | |
184 | .section .fixup, "ax" | |
185 | 3: li r9, KVM_INST_FETCH_FAILED | |
186 | b 2b | |
187 | .previous | |
188 | .section __ex_table,"a" | |
189 | PPC_LONG_ALIGN | |
190 | PPC_LONG 1b,3b | |
191 | .previous | |
192 | ||
d30f6e48 | 193 | mtspr SPRN_EPLC, r3 |
1d628af7 AG |
194 | li r7, 0 |
195 | stw r7, TI_PREEMPT(r8) | |
d30f6e48 SW |
196 | stw r9, VCPU_LAST_INST(r4) |
197 | .endif | |
198 | ||
199 | .if \flags & NEED_ESR | |
200 | mfspr r8, SPRN_ESR | |
201 | PPC_STL r8, VCPU_FAULT_ESR(r4) | |
202 | .endif | |
203 | ||
204 | .if \flags & NEED_DEAR | |
205 | mfspr r9, SPRN_DEAR | |
206 | PPC_STL r9, VCPU_FAULT_DEAR(r4) | |
207 | .endif | |
208 | ||
209 | b kvmppc_resume_host | |
210 | .endm | |
211 | ||
212 | /* | |
213 | * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h | |
214 | */ | |
215 | .macro kvm_handler intno srr0, srr1, flags | |
216 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |
217 | GET_VCPU(r11, r10) | |
218 | PPC_STL r3, VCPU_GPR(r3)(r11) | |
219 | mfspr r3, SPRN_SPRG_RSCRATCH0 | |
220 | PPC_STL r4, VCPU_GPR(r4)(r11) | |
221 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | |
222 | PPC_STL r5, VCPU_GPR(r5)(r11) | |
223 | PPC_STL r13, VCPU_CR(r11) | |
224 | mfspr r5, \srr0 | |
225 | PPC_STL r3, VCPU_GPR(r10)(r11) | |
226 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | |
227 | PPC_STL r6, VCPU_GPR(r6)(r11) | |
228 | PPC_STL r4, VCPU_GPR(r11)(r11) | |
229 | mfspr r6, \srr1 | |
230 | PPC_STL r7, VCPU_GPR(r7)(r11) | |
231 | PPC_STL r8, VCPU_GPR(r8)(r11) | |
232 | PPC_STL r9, VCPU_GPR(r9)(r11) | |
233 | PPC_STL r3, VCPU_GPR(r13)(r11) | |
234 | mfctr r7 | |
235 | PPC_STL r12, VCPU_GPR(r12)(r11) | |
236 | PPC_STL r7, VCPU_CTR(r11) | |
237 | mr r4, r11 | |
238 | kvm_handler_common \intno, \srr0, \flags | |
239 | .endm | |
240 | ||
241 | .macro kvm_lvl_handler intno scratch srr0, srr1, flags | |
242 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |
243 | mfspr r10, SPRN_SPRG_THREAD | |
244 | GET_VCPU(r11, r10) | |
245 | PPC_STL r3, VCPU_GPR(r3)(r11) | |
246 | mfspr r3, \scratch | |
247 | PPC_STL r4, VCPU_GPR(r4)(r11) | |
248 | PPC_LL r4, GPR9(r8) | |
249 | PPC_STL r5, VCPU_GPR(r5)(r11) | |
250 | PPC_STL r9, VCPU_CR(r11) | |
251 | mfspr r5, \srr0 | |
252 | PPC_STL r3, VCPU_GPR(r8)(r11) | |
253 | PPC_LL r3, GPR10(r8) | |
254 | PPC_STL r6, VCPU_GPR(r6)(r11) | |
255 | PPC_STL r4, VCPU_GPR(r9)(r11) | |
256 | mfspr r6, \srr1 | |
257 | PPC_LL r4, GPR11(r8) | |
258 | PPC_STL r7, VCPU_GPR(r7)(r11) | |
259 | PPC_STL r8, VCPU_GPR(r8)(r11) | |
260 | PPC_STL r3, VCPU_GPR(r10)(r11) | |
261 | mfctr r7 | |
262 | PPC_STL r12, VCPU_GPR(r12)(r11) | |
263 | PPC_STL r4, VCPU_GPR(r11)(r11) | |
264 | PPC_STL r7, VCPU_CTR(r11) | |
265 | mr r4, r11 | |
266 | kvm_handler_common \intno, \srr0, \flags | |
267 | .endm | |
268 | ||
269 | kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ | |
270 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
271 | kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ | |
272 | SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 | |
273 | kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ | |
274 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) | |
275 | kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR | |
276 | kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 | |
277 | kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ | |
278 | SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) | |
279 | kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR | |
280 | kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | |
281 | kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | |
282 | kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | |
283 | kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 | |
284 | kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 | |
285 | kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ | |
286 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
287 | kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ | |
288 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) | |
289 | kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 | |
290 | kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | |
291 | kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0 | |
292 | kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0 | |
293 | kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 | |
294 | kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 | |
295 | kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ | |
296 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
297 | kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU | |
298 | kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | |
299 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 | |
300 | kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ | |
301 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
302 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | |
303 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
304 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | |
305 | SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 | |
306 | ||
307 | ||
308 | /* Registers: | |
309 | * SPRG_SCRATCH0: guest r10 | |
310 | * r4: vcpu pointer | |
311 | * r11: vcpu->arch.shared | |
312 | * r14: KVM exit number | |
313 | */ | |
314 | _GLOBAL(kvmppc_resume_host) | |
315 | /* Save remaining volatile guest register state to vcpu. */ | |
316 | mfspr r3, SPRN_VRSAVE | |
317 | PPC_STL r0, VCPU_GPR(r0)(r4) | |
d30f6e48 SW |
318 | mflr r5 |
319 | mfspr r6, SPRN_SPRG4 | |
d30f6e48 SW |
320 | PPC_STL r5, VCPU_LR(r4) |
321 | mfspr r7, SPRN_SPRG5 | |
322 | PPC_STL r3, VCPU_VRSAVE(r4) | |
323 | PPC_STL r6, VCPU_SHARED_SPRG4(r11) | |
324 | mfspr r8, SPRN_SPRG6 | |
325 | PPC_STL r7, VCPU_SHARED_SPRG5(r11) | |
326 | mfspr r9, SPRN_SPRG7 | |
327 | PPC_STL r8, VCPU_SHARED_SPRG6(r11) | |
328 | mfxer r3 | |
329 | PPC_STL r9, VCPU_SHARED_SPRG7(r11) | |
330 | ||
331 | /* save guest MAS registers and restore host mas4 & mas6 */ | |
332 | mfspr r5, SPRN_MAS0 | |
333 | PPC_STL r3, VCPU_XER(r4) | |
334 | mfspr r6, SPRN_MAS1 | |
335 | stw r5, VCPU_SHARED_MAS0(r11) | |
336 | mfspr r7, SPRN_MAS2 | |
337 | stw r6, VCPU_SHARED_MAS1(r11) | |
8764b46e | 338 | #ifdef CONFIG_64BIT |
d30f6e48 | 339 | std r7, (VCPU_SHARED_MAS2)(r11) |
8764b46e AG |
340 | #else |
341 | stw r7, (VCPU_SHARED_MAS2 + 4)(r11) | |
d30f6e48 SW |
342 | #endif |
343 | mfspr r5, SPRN_MAS3 | |
344 | mfspr r6, SPRN_MAS4 | |
345 | stw r5, VCPU_SHARED_MAS7_3+4(r11) | |
346 | mfspr r7, SPRN_MAS6 | |
347 | stw r6, VCPU_SHARED_MAS4(r11) | |
348 | mfspr r5, SPRN_MAS7 | |
349 | lwz r6, VCPU_HOST_MAS4(r4) | |
350 | stw r7, VCPU_SHARED_MAS6(r11) | |
351 | lwz r8, VCPU_HOST_MAS6(r4) | |
352 | mtspr SPRN_MAS4, r6 | |
353 | stw r5, VCPU_SHARED_MAS7_3+0(r11) | |
354 | mtspr SPRN_MAS6, r8 | |
e9ba39c1 | 355 | /* Enable MAS register updates via exception */ |
d30f6e48 SW |
356 | mfspr r3, SPRN_EPCR |
357 | rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH | |
358 | mtspr SPRN_EPCR, r3 | |
359 | isync | |
360 | ||
d30f6e48 SW |
361 | /* Switch to kernel stack and jump to handler. */ |
362 | PPC_LL r3, HOST_RUN(r1) | |
363 | mr r5, r14 /* intno */ | |
364 | mr r14, r4 /* Save vcpu pointer. */ | |
365 | bl kvmppc_handle_exit | |
366 | ||
367 | /* Restore vcpu pointer and the nonvolatiles we used. */ | |
368 | mr r4, r14 | |
369 | PPC_LL r14, VCPU_GPR(r14)(r4) | |
370 | ||
371 | andi. r5, r3, RESUME_FLAG_NV | |
372 | beq skip_nv_load | |
373 | PPC_LL r15, VCPU_GPR(r15)(r4) | |
374 | PPC_LL r16, VCPU_GPR(r16)(r4) | |
375 | PPC_LL r17, VCPU_GPR(r17)(r4) | |
376 | PPC_LL r18, VCPU_GPR(r18)(r4) | |
377 | PPC_LL r19, VCPU_GPR(r19)(r4) | |
378 | PPC_LL r20, VCPU_GPR(r20)(r4) | |
379 | PPC_LL r21, VCPU_GPR(r21)(r4) | |
380 | PPC_LL r22, VCPU_GPR(r22)(r4) | |
381 | PPC_LL r23, VCPU_GPR(r23)(r4) | |
382 | PPC_LL r24, VCPU_GPR(r24)(r4) | |
383 | PPC_LL r25, VCPU_GPR(r25)(r4) | |
384 | PPC_LL r26, VCPU_GPR(r26)(r4) | |
385 | PPC_LL r27, VCPU_GPR(r27)(r4) | |
386 | PPC_LL r28, VCPU_GPR(r28)(r4) | |
387 | PPC_LL r29, VCPU_GPR(r29)(r4) | |
388 | PPC_LL r30, VCPU_GPR(r30)(r4) | |
389 | PPC_LL r31, VCPU_GPR(r31)(r4) | |
390 | skip_nv_load: | |
391 | /* Should we return to the guest? */ | |
392 | andi. r5, r3, RESUME_FLAG_HOST | |
393 | beq lightweight_exit | |
394 | ||
395 | srawi r3, r3, 2 /* Shift -ERR back down. */ | |
396 | ||
397 | heavyweight_exit: | |
398 | /* Not returning to guest. */ | |
399 | PPC_LL r5, HOST_STACK_LR(r1) | |
f6127716 | 400 | lwz r6, HOST_CR(r1) |
d30f6e48 SW |
401 | |
402 | /* | |
403 | * We already saved guest volatile register state; now save the | |
404 | * non-volatiles. | |
405 | */ | |
406 | ||
407 | PPC_STL r15, VCPU_GPR(r15)(r4) | |
408 | PPC_STL r16, VCPU_GPR(r16)(r4) | |
409 | PPC_STL r17, VCPU_GPR(r17)(r4) | |
410 | PPC_STL r18, VCPU_GPR(r18)(r4) | |
411 | PPC_STL r19, VCPU_GPR(r19)(r4) | |
412 | PPC_STL r20, VCPU_GPR(r20)(r4) | |
413 | PPC_STL r21, VCPU_GPR(r21)(r4) | |
414 | PPC_STL r22, VCPU_GPR(r22)(r4) | |
415 | PPC_STL r23, VCPU_GPR(r23)(r4) | |
416 | PPC_STL r24, VCPU_GPR(r24)(r4) | |
417 | PPC_STL r25, VCPU_GPR(r25)(r4) | |
418 | PPC_STL r26, VCPU_GPR(r26)(r4) | |
419 | PPC_STL r27, VCPU_GPR(r27)(r4) | |
420 | PPC_STL r28, VCPU_GPR(r28)(r4) | |
421 | PPC_STL r29, VCPU_GPR(r29)(r4) | |
422 | PPC_STL r30, VCPU_GPR(r30)(r4) | |
423 | PPC_STL r31, VCPU_GPR(r31)(r4) | |
424 | ||
425 | /* Load host non-volatile register state from host stack. */ | |
426 | PPC_LL r14, HOST_NV_GPR(r14)(r1) | |
427 | PPC_LL r15, HOST_NV_GPR(r15)(r1) | |
428 | PPC_LL r16, HOST_NV_GPR(r16)(r1) | |
429 | PPC_LL r17, HOST_NV_GPR(r17)(r1) | |
430 | PPC_LL r18, HOST_NV_GPR(r18)(r1) | |
431 | PPC_LL r19, HOST_NV_GPR(r19)(r1) | |
432 | PPC_LL r20, HOST_NV_GPR(r20)(r1) | |
433 | PPC_LL r21, HOST_NV_GPR(r21)(r1) | |
434 | PPC_LL r22, HOST_NV_GPR(r22)(r1) | |
435 | PPC_LL r23, HOST_NV_GPR(r23)(r1) | |
436 | PPC_LL r24, HOST_NV_GPR(r24)(r1) | |
437 | PPC_LL r25, HOST_NV_GPR(r25)(r1) | |
438 | PPC_LL r26, HOST_NV_GPR(r26)(r1) | |
439 | PPC_LL r27, HOST_NV_GPR(r27)(r1) | |
440 | PPC_LL r28, HOST_NV_GPR(r28)(r1) | |
441 | PPC_LL r29, HOST_NV_GPR(r29)(r1) | |
442 | PPC_LL r30, HOST_NV_GPR(r30)(r1) | |
443 | PPC_LL r31, HOST_NV_GPR(r31)(r1) | |
444 | ||
445 | /* Return to kvm_vcpu_run(). */ | |
446 | mtlr r5 | |
f6127716 | 447 | mtcr r6 |
d30f6e48 SW |
448 | addi r1, r1, HOST_STACK_SIZE |
449 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | |
450 | blr | |
451 | ||
452 | /* Registers: | |
453 | * r3: kvm_run pointer | |
454 | * r4: vcpu pointer | |
455 | */ | |
456 | _GLOBAL(__kvmppc_vcpu_run) | |
457 | stwu r1, -HOST_STACK_SIZE(r1) | |
458 | PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | |
459 | ||
460 | /* Save host state to stack. */ | |
461 | PPC_STL r3, HOST_RUN(r1) | |
462 | mflr r3 | |
f6127716 | 463 | mfcr r5 |
d30f6e48 SW |
464 | PPC_STL r3, HOST_STACK_LR(r1) |
465 | ||
f6127716 AG |
466 | stw r5, HOST_CR(r1) |
467 | ||
d30f6e48 SW |
468 | /* Save host non-volatile register state to stack. */ |
469 | PPC_STL r14, HOST_NV_GPR(r14)(r1) | |
470 | PPC_STL r15, HOST_NV_GPR(r15)(r1) | |
471 | PPC_STL r16, HOST_NV_GPR(r16)(r1) | |
472 | PPC_STL r17, HOST_NV_GPR(r17)(r1) | |
473 | PPC_STL r18, HOST_NV_GPR(r18)(r1) | |
474 | PPC_STL r19, HOST_NV_GPR(r19)(r1) | |
475 | PPC_STL r20, HOST_NV_GPR(r20)(r1) | |
476 | PPC_STL r21, HOST_NV_GPR(r21)(r1) | |
477 | PPC_STL r22, HOST_NV_GPR(r22)(r1) | |
478 | PPC_STL r23, HOST_NV_GPR(r23)(r1) | |
479 | PPC_STL r24, HOST_NV_GPR(r24)(r1) | |
480 | PPC_STL r25, HOST_NV_GPR(r25)(r1) | |
481 | PPC_STL r26, HOST_NV_GPR(r26)(r1) | |
482 | PPC_STL r27, HOST_NV_GPR(r27)(r1) | |
483 | PPC_STL r28, HOST_NV_GPR(r28)(r1) | |
484 | PPC_STL r29, HOST_NV_GPR(r29)(r1) | |
485 | PPC_STL r30, HOST_NV_GPR(r30)(r1) | |
486 | PPC_STL r31, HOST_NV_GPR(r31)(r1) | |
487 | ||
488 | /* Load guest non-volatiles. */ | |
489 | PPC_LL r14, VCPU_GPR(r14)(r4) | |
490 | PPC_LL r15, VCPU_GPR(r15)(r4) | |
491 | PPC_LL r16, VCPU_GPR(r16)(r4) | |
492 | PPC_LL r17, VCPU_GPR(r17)(r4) | |
493 | PPC_LL r18, VCPU_GPR(r18)(r4) | |
494 | PPC_LL r19, VCPU_GPR(r19)(r4) | |
495 | PPC_LL r20, VCPU_GPR(r20)(r4) | |
496 | PPC_LL r21, VCPU_GPR(r21)(r4) | |
497 | PPC_LL r22, VCPU_GPR(r22)(r4) | |
498 | PPC_LL r23, VCPU_GPR(r23)(r4) | |
499 | PPC_LL r24, VCPU_GPR(r24)(r4) | |
500 | PPC_LL r25, VCPU_GPR(r25)(r4) | |
501 | PPC_LL r26, VCPU_GPR(r26)(r4) | |
502 | PPC_LL r27, VCPU_GPR(r27)(r4) | |
503 | PPC_LL r28, VCPU_GPR(r28)(r4) | |
504 | PPC_LL r29, VCPU_GPR(r29)(r4) | |
505 | PPC_LL r30, VCPU_GPR(r30)(r4) | |
506 | PPC_LL r31, VCPU_GPR(r31)(r4) | |
507 | ||
508 | ||
509 | lightweight_exit: | |
510 | PPC_STL r2, HOST_R2(r1) | |
511 | ||
512 | mfspr r3, SPRN_PID | |
513 | stw r3, VCPU_HOST_PID(r4) | |
514 | lwz r3, VCPU_GUEST_PID(r4) | |
515 | mtspr SPRN_PID, r3 | |
516 | ||
d30f6e48 | 517 | PPC_LL r11, VCPU_SHARED(r4) |
e9ba39c1 AG |
518 | /* Disable MAS register updates via exception */ |
519 | mfspr r3, SPRN_EPCR | |
520 | oris r3, r3, SPRN_EPCR_DMIUH@h | |
521 | mtspr SPRN_EPCR, r3 | |
522 | isync | |
d30f6e48 SW |
523 | /* Save host mas4 and mas6 and load guest MAS registers */ |
524 | mfspr r3, SPRN_MAS4 | |
525 | stw r3, VCPU_HOST_MAS4(r4) | |
526 | mfspr r3, SPRN_MAS6 | |
527 | stw r3, VCPU_HOST_MAS6(r4) | |
528 | lwz r3, VCPU_SHARED_MAS0(r11) | |
529 | lwz r5, VCPU_SHARED_MAS1(r11) | |
8764b46e | 530 | #ifdef CONFIG_64BIT |
d30f6e48 | 531 | ld r6, (VCPU_SHARED_MAS2)(r11) |
8764b46e AG |
532 | #else |
533 | lwz r6, (VCPU_SHARED_MAS2 + 4)(r11) | |
d30f6e48 SW |
534 | #endif |
535 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) | |
536 | lwz r8, VCPU_SHARED_MAS4(r11) | |
537 | mtspr SPRN_MAS0, r3 | |
538 | mtspr SPRN_MAS1, r5 | |
539 | mtspr SPRN_MAS2, r6 | |
540 | mtspr SPRN_MAS3, r7 | |
541 | mtspr SPRN_MAS4, r8 | |
542 | lwz r3, VCPU_SHARED_MAS6(r11) | |
543 | lwz r5, VCPU_SHARED_MAS7_3+0(r11) | |
544 | mtspr SPRN_MAS6, r3 | |
545 | mtspr SPRN_MAS7, r5 | |
d30f6e48 SW |
546 | |
547 | /* | |
548 | * Host interrupt handlers may have clobbered these guest-readable | |
549 | * SPRGs, so we need to reload them here with the guest's values. | |
550 | */ | |
551 | lwz r3, VCPU_VRSAVE(r4) | |
552 | lwz r5, VCPU_SHARED_SPRG4(r11) | |
553 | mtspr SPRN_VRSAVE, r3 | |
554 | lwz r6, VCPU_SHARED_SPRG5(r11) | |
555 | mtspr SPRN_SPRG4W, r5 | |
556 | lwz r7, VCPU_SHARED_SPRG6(r11) | |
557 | mtspr SPRN_SPRG5W, r6 | |
558 | lwz r8, VCPU_SHARED_SPRG7(r11) | |
559 | mtspr SPRN_SPRG6W, r7 | |
560 | mtspr SPRN_SPRG7W, r8 | |
561 | ||
562 | /* Load some guest volatiles. */ | |
563 | PPC_LL r3, VCPU_LR(r4) | |
564 | PPC_LL r5, VCPU_XER(r4) | |
565 | PPC_LL r6, VCPU_CTR(r4) | |
566 | PPC_LL r7, VCPU_CR(r4) | |
567 | PPC_LL r8, VCPU_PC(r4) | |
8764b46e | 568 | #ifdef CONFIG_64BIT |
d30f6e48 | 569 | ld r9, (VCPU_SHARED_MSR)(r11) |
8764b46e AG |
570 | #else |
571 | lwz r9, (VCPU_SHARED_MSR + 4)(r11) | |
d30f6e48 SW |
572 | #endif |
573 | PPC_LL r0, VCPU_GPR(r0)(r4) | |
574 | PPC_LL r1, VCPU_GPR(r1)(r4) | |
575 | PPC_LL r2, VCPU_GPR(r2)(r4) | |
576 | PPC_LL r10, VCPU_GPR(r10)(r4) | |
577 | PPC_LL r11, VCPU_GPR(r11)(r4) | |
578 | PPC_LL r12, VCPU_GPR(r12)(r4) | |
579 | PPC_LL r13, VCPU_GPR(r13)(r4) | |
580 | mtlr r3 | |
581 | mtxer r5 | |
582 | mtctr r6 | |
583 | mtcr r7 | |
584 | mtsrr0 r8 | |
585 | mtsrr1 r9 | |
586 | ||
587 | #ifdef CONFIG_KVM_EXIT_TIMING | |
588 | /* save enter time */ | |
589 | 1: | |
590 | mfspr r6, SPRN_TBRU | |
591 | mfspr r7, SPRN_TBRL | |
592 | mfspr r8, SPRN_TBRU | |
593 | cmpw r8, r6 | |
594 | PPC_STL r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | |
595 | bne 1b | |
596 | PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | |
597 | #endif | |
598 | ||
599 | /* Finish loading guest volatiles and jump to guest. */ | |
600 | PPC_LL r5, VCPU_GPR(r5)(r4) | |
601 | PPC_LL r6, VCPU_GPR(r6)(r4) | |
602 | PPC_LL r7, VCPU_GPR(r7)(r4) | |
603 | PPC_LL r8, VCPU_GPR(r8)(r4) | |
604 | PPC_LL r9, VCPU_GPR(r9)(r4) | |
605 | ||
606 | PPC_LL r3, VCPU_GPR(r3)(r4) | |
607 | PPC_LL r4, VCPU_GPR(r4)(r4) | |
608 | rfi |