Commit | Line | Data |
---|---|---|
f05ed4d5 PM |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * Paul Mackerras <paulus@samba.org> | |
8 | * | |
9 | * Description: | |
10 | * Functions relating to running KVM on Book 3S processors where | |
11 | * we don't have access to hypervisor mode, and we run the guest | |
12 | * in problem state (user mode). | |
13 | * | |
14 | * This file is derived from arch/powerpc/kvm/44x.c, | |
15 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or modify | |
18 | * it under the terms of the GNU General Public License, version 2, as | |
19 | * published by the Free Software Foundation. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
23 | #include <linux/err.h> | |
24 | #include <linux/slab.h> | |
25 | ||
26 | #include <asm/reg.h> | |
27 | #include <asm/cputable.h> | |
28 | #include <asm/cacheflush.h> | |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/uaccess.h> | |
31 | #include <asm/io.h> | |
32 | #include <asm/kvm_ppc.h> | |
33 | #include <asm/kvm_book3s.h> | |
34 | #include <asm/mmu_context.h> | |
35 | #include <linux/gfp.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/vmalloc.h> | |
38 | #include <linux/highmem.h> | |
39 | ||
40 | #include "trace.h" | |
41 | ||
42 | /* #define EXIT_DEBUG */ | |
43 | /* #define DEBUG_EXT */ | |
44 | ||
45 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
46 | ulong msr); | |
47 | ||
48 | /* Some compatibility defines */ | |
49 | #ifdef CONFIG_PPC_BOOK3S_32 | |
50 | #define MSR_USER32 MSR_USER | |
51 | #define MSR_USER64 MSR_USER | |
52 | #define HW_PAGE_SIZE PAGE_SIZE | |
53 | #endif | |
54 | ||
55 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
56 | { | |
57 | #ifdef CONFIG_PPC_BOOK3S_64 | |
58 | memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); | |
59 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, | |
60 | sizeof(get_paca()->shadow_vcpu)); | |
61 | to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; | |
62 | #endif | |
63 | ||
64 | #ifdef CONFIG_PPC_BOOK3S_32 | |
65 | current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; | |
66 | #endif | |
67 | } | |
68 | ||
69 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
70 | { | |
71 | #ifdef CONFIG_PPC_BOOK3S_64 | |
72 | memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); | |
73 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | |
74 | sizeof(get_paca()->shadow_vcpu)); | |
75 | to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; | |
76 | #endif | |
77 | ||
78 | kvmppc_giveup_ext(vcpu, MSR_FP); | |
79 | kvmppc_giveup_ext(vcpu, MSR_VEC); | |
80 | kvmppc_giveup_ext(vcpu, MSR_VSX); | |
81 | } | |
82 | ||
83 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | |
84 | { | |
85 | ulong smsr = vcpu->arch.shared->msr; | |
86 | ||
87 | /* Guest MSR values */ | |
88 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; | |
89 | /* Process MSR values */ | |
90 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
91 | /* External providers the guest reserved */ | |
92 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); | |
93 | /* 64-bit Process MSR values */ | |
94 | #ifdef CONFIG_PPC_BOOK3S_64 | |
95 | smsr |= MSR_ISF | MSR_HV; | |
96 | #endif | |
97 | vcpu->arch.shadow_msr = smsr; | |
98 | } | |
99 | ||
100 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |
101 | { | |
102 | ulong old_msr = vcpu->arch.shared->msr; | |
103 | ||
104 | #ifdef EXIT_DEBUG | |
105 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
106 | #endif | |
107 | ||
108 | msr &= to_book3s(vcpu)->msr_mask; | |
109 | vcpu->arch.shared->msr = msr; | |
110 | kvmppc_recalc_shadow_msr(vcpu); | |
111 | ||
112 | if (msr & MSR_POW) { | |
113 | if (!vcpu->arch.pending_exceptions) { | |
114 | kvm_vcpu_block(vcpu); | |
115 | vcpu->stat.halt_wakeup++; | |
116 | ||
117 | /* Unset POW bit after we woke up */ | |
118 | msr &= ~MSR_POW; | |
119 | vcpu->arch.shared->msr = msr; | |
120 | } | |
121 | } | |
122 | ||
123 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != | |
124 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | |
125 | kvmppc_mmu_flush_segments(vcpu); | |
126 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
127 | ||
128 | /* Preload magic page segment when in kernel mode */ | |
129 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
130 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
131 | ||
132 | if (msr & MSR_DR) | |
133 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
134 | else | |
135 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
136 | } | |
137 | } | |
138 | ||
139 | /* Preload FPU if it's enabled */ | |
140 | if (vcpu->arch.shared->msr & MSR_FP) | |
141 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
142 | } | |
143 | ||
144 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
145 | { | |
146 | u32 host_pvr; | |
147 | ||
148 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
149 | vcpu->arch.pvr = pvr; | |
150 | #ifdef CONFIG_PPC_BOOK3S_64 | |
151 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
152 | kvmppc_mmu_book3s_64_init(vcpu); | |
a15bd354 AG |
153 | if (!to_book3s(vcpu)->hior_sregs) |
154 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 PM |
155 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
156 | } else | |
157 | #endif | |
158 | { | |
159 | kvmppc_mmu_book3s_32_init(vcpu); | |
a15bd354 AG |
160 | if (!to_book3s(vcpu)->hior_sregs) |
161 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 PM |
162 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
163 | } | |
164 | ||
165 | /* If we are in hypervisor level on 970, we can tell the CPU to | |
166 | * treat DCBZ as 32 bytes store */ | |
167 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
168 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
169 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
170 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
171 | ||
172 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
173 | really needs them in a VM on Cell and force disable them. */ | |
174 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
175 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
176 | ||
177 | #ifdef CONFIG_PPC_BOOK3S_32 | |
178 | /* 32 bit Book3S always has 32 byte dcbz */ | |
179 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
180 | #endif | |
181 | ||
182 | /* On some CPUs we can execute paired single operations natively */ | |
183 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
184 | switch (host_pvr) { | |
185 | case 0x00080200: /* lonestar 2.0 */ | |
186 | case 0x00088202: /* lonestar 2.2 */ | |
187 | case 0x70000100: /* gekko 1.0 */ | |
188 | case 0x00080100: /* gekko 2.0 */ | |
189 | case 0x00083203: /* gekko 2.3a */ | |
190 | case 0x00083213: /* gekko 2.3b */ | |
191 | case 0x00083204: /* gekko 2.4 */ | |
192 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
193 | case 0x00087200: /* broadway */ | |
194 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
195 | /* Enable HID2.PSE - in case we need it later */ | |
196 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
197 | } | |
198 | } | |
199 | ||
200 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
201 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
202 | * emulate 32 bytes dcbz length. | |
203 | * | |
204 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
205 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
206 | * | |
207 | * My approach here is to patch the dcbz instruction on executing pages. | |
208 | */ | |
209 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
210 | { | |
211 | struct page *hpage; | |
212 | u64 hpage_offset; | |
213 | u32 *page; | |
214 | int i; | |
215 | ||
216 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
217 | if (is_error_page(hpage)) { | |
218 | kvm_release_page_clean(hpage); | |
219 | return; | |
220 | } | |
221 | ||
222 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
223 | hpage_offset &= ~0xFFFULL; | |
224 | hpage_offset /= 4; | |
225 | ||
226 | get_page(hpage); | |
227 | page = kmap_atomic(hpage, KM_USER0); | |
228 | ||
229 | /* patch dcbz into reserved instruction, so we trap */ | |
230 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
231 | if ((page[i] & 0xff0007ff) == INS_DCBZ) | |
232 | page[i] &= 0xfffffff7; | |
233 | ||
234 | kunmap_atomic(page, KM_USER0); | |
235 | put_page(hpage); | |
236 | } | |
237 | ||
238 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |
239 | { | |
240 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
241 | ||
242 | if (unlikely(mp_pa) && | |
243 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | |
244 | return 1; | |
245 | } | |
246 | ||
247 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | |
248 | } | |
249 | ||
250 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
251 | ulong eaddr, int vec) | |
252 | { | |
253 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
254 | int r = RESUME_GUEST; | |
255 | int relocated; | |
256 | int page_found = 0; | |
257 | struct kvmppc_pte pte; | |
258 | bool is_mmio = false; | |
259 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; | |
260 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; | |
261 | u64 vsid; | |
262 | ||
263 | relocated = data ? dr : ir; | |
264 | ||
265 | /* Resolve real address if translation turned on */ | |
266 | if (relocated) { | |
267 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | |
268 | } else { | |
269 | pte.may_execute = true; | |
270 | pte.may_read = true; | |
271 | pte.may_write = true; | |
272 | pte.raddr = eaddr & KVM_PAM; | |
273 | pte.eaddr = eaddr; | |
274 | pte.vpage = eaddr >> 12; | |
275 | } | |
276 | ||
277 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | |
278 | case 0: | |
279 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
280 | break; | |
281 | case MSR_DR: | |
282 | case MSR_IR: | |
283 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
284 | ||
285 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) | |
286 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | |
287 | else | |
288 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
289 | pte.vpage |= vsid; | |
290 | ||
291 | if (vsid == -1) | |
292 | page_found = -EINVAL; | |
293 | break; | |
294 | } | |
295 | ||
296 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
297 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
298 | /* | |
299 | * If we do the dcbz hack, we have to NX on every execution, | |
300 | * so we can patch the executing code. This renders our guest | |
301 | * NX-less. | |
302 | */ | |
303 | pte.may_execute = !data; | |
304 | } | |
305 | ||
306 | if (page_found == -ENOENT) { | |
307 | /* Page not found in guest PTE entries */ | |
308 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
309 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; | |
310 | vcpu->arch.shared->msr |= | |
311 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | |
312 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
313 | } else if (page_found == -EPERM) { | |
314 | /* Storage protection */ | |
315 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
316 | vcpu->arch.shared->dsisr = | |
317 | to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; | |
318 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; | |
319 | vcpu->arch.shared->msr |= | |
320 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | |
321 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
322 | } else if (page_found == -EINVAL) { | |
323 | /* Page not found in guest SLB */ | |
324 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
325 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | |
326 | } else if (!is_mmio && | |
327 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | |
328 | /* The guest's PTE is not mapped yet. Map on the host */ | |
329 | kvmppc_mmu_map_page(vcpu, &pte); | |
330 | if (data) | |
331 | vcpu->stat.sp_storage++; | |
332 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
333 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | |
334 | kvmppc_patch_dcbz(vcpu, &pte); | |
335 | } else { | |
336 | /* MMIO */ | |
337 | vcpu->stat.mmio_exits++; | |
338 | vcpu->arch.paddr_accessed = pte.raddr; | |
339 | r = kvmppc_emulate_mmio(run, vcpu); | |
340 | if ( r == RESUME_HOST_NV ) | |
341 | r = RESUME_HOST; | |
342 | } | |
343 | ||
344 | return r; | |
345 | } | |
346 | ||
347 | static inline int get_fpr_index(int i) | |
348 | { | |
349 | #ifdef CONFIG_VSX | |
350 | i *= 2; | |
351 | #endif | |
352 | return i; | |
353 | } | |
354 | ||
355 | /* Give up external provider (FPU, Altivec, VSX) */ | |
356 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
357 | { | |
358 | struct thread_struct *t = ¤t->thread; | |
359 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
360 | #ifdef CONFIG_VSX | |
361 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
362 | #endif | |
363 | u64 *thread_fpr = (u64*)t->fpr; | |
364 | int i; | |
365 | ||
366 | if (!(vcpu->arch.guest_owned_ext & msr)) | |
367 | return; | |
368 | ||
369 | #ifdef DEBUG_EXT | |
370 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
371 | #endif | |
372 | ||
373 | switch (msr) { | |
374 | case MSR_FP: | |
375 | giveup_fpu(current); | |
376 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | |
377 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | |
378 | ||
379 | vcpu->arch.fpscr = t->fpscr.val; | |
380 | break; | |
381 | case MSR_VEC: | |
382 | #ifdef CONFIG_ALTIVEC | |
383 | giveup_altivec(current); | |
384 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); | |
385 | vcpu->arch.vscr = t->vscr; | |
386 | #endif | |
387 | break; | |
388 | case MSR_VSX: | |
389 | #ifdef CONFIG_VSX | |
390 | __giveup_vsx(current); | |
391 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | |
392 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | |
393 | #endif | |
394 | break; | |
395 | default: | |
396 | BUG(); | |
397 | } | |
398 | ||
399 | vcpu->arch.guest_owned_ext &= ~msr; | |
400 | current->thread.regs->msr &= ~msr; | |
401 | kvmppc_recalc_shadow_msr(vcpu); | |
402 | } | |
403 | ||
404 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |
405 | { | |
406 | ulong srr0 = kvmppc_get_pc(vcpu); | |
407 | u32 last_inst = kvmppc_get_last_inst(vcpu); | |
408 | int ret; | |
409 | ||
410 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | |
411 | if (ret == -ENOENT) { | |
412 | ulong msr = vcpu->arch.shared->msr; | |
413 | ||
414 | msr = kvmppc_set_field(msr, 33, 33, 1); | |
415 | msr = kvmppc_set_field(msr, 34, 36, 0); | |
416 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | |
417 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | |
418 | return EMULATE_AGAIN; | |
419 | } | |
420 | ||
421 | return EMULATE_DONE; | |
422 | } | |
423 | ||
424 | static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) | |
425 | { | |
426 | ||
427 | /* Need to do paired single emulation? */ | |
428 | if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) | |
429 | return EMULATE_DONE; | |
430 | ||
431 | /* Read out the instruction */ | |
432 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) | |
433 | /* Need to emulate */ | |
434 | return EMULATE_FAIL; | |
435 | ||
436 | return EMULATE_AGAIN; | |
437 | } | |
438 | ||
439 | /* Handle external providers (FPU, Altivec, VSX) */ | |
440 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
441 | ulong msr) | |
442 | { | |
443 | struct thread_struct *t = ¤t->thread; | |
444 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
445 | #ifdef CONFIG_VSX | |
446 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
447 | #endif | |
448 | u64 *thread_fpr = (u64*)t->fpr; | |
449 | int i; | |
450 | ||
451 | /* When we have paired singles, we emulate in software */ | |
452 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
453 | return RESUME_GUEST; | |
454 | ||
455 | if (!(vcpu->arch.shared->msr & msr)) { | |
456 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
457 | return RESUME_GUEST; | |
458 | } | |
459 | ||
460 | /* We already own the ext */ | |
461 | if (vcpu->arch.guest_owned_ext & msr) { | |
462 | return RESUME_GUEST; | |
463 | } | |
464 | ||
465 | #ifdef DEBUG_EXT | |
466 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
467 | #endif | |
468 | ||
469 | current->thread.regs->msr |= msr; | |
470 | ||
471 | switch (msr) { | |
472 | case MSR_FP: | |
473 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | |
474 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | |
475 | ||
476 | t->fpscr.val = vcpu->arch.fpscr; | |
477 | t->fpexc_mode = 0; | |
478 | kvmppc_load_up_fpu(); | |
479 | break; | |
480 | case MSR_VEC: | |
481 | #ifdef CONFIG_ALTIVEC | |
482 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | |
483 | t->vscr = vcpu->arch.vscr; | |
484 | t->vrsave = -1; | |
485 | kvmppc_load_up_altivec(); | |
486 | #endif | |
487 | break; | |
488 | case MSR_VSX: | |
489 | #ifdef CONFIG_VSX | |
490 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | |
491 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | |
492 | kvmppc_load_up_vsx(); | |
493 | #endif | |
494 | break; | |
495 | default: | |
496 | BUG(); | |
497 | } | |
498 | ||
499 | vcpu->arch.guest_owned_ext |= msr; | |
500 | ||
501 | kvmppc_recalc_shadow_msr(vcpu); | |
502 | ||
503 | return RESUME_GUEST; | |
504 | } | |
505 | ||
506 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
507 | unsigned int exit_nr) | |
508 | { | |
509 | int r = RESUME_HOST; | |
510 | ||
511 | vcpu->stat.sum_exits++; | |
512 | ||
513 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
514 | run->ready_for_interrupt_injection = 1; | |
515 | ||
516 | trace_kvm_book3s_exit(exit_nr, vcpu); | |
517 | kvm_resched(vcpu); | |
518 | switch (exit_nr) { | |
519 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
520 | vcpu->stat.pf_instruc++; | |
521 | ||
522 | #ifdef CONFIG_PPC_BOOK3S_32 | |
523 | /* We set segments as unused segments when invalidating them. So | |
524 | * treat the respective fault as segment fault. */ | |
525 | if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] | |
526 | == SR_INVALID) { | |
527 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
528 | r = RESUME_GUEST; | |
529 | break; | |
530 | } | |
531 | #endif | |
532 | ||
533 | /* only care about PTEG not found errors, but leave NX alone */ | |
534 | if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { | |
535 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); | |
536 | vcpu->stat.sp_instruc++; | |
537 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
538 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
539 | /* | |
540 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
541 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
542 | * that no guest that needs the dcbz hack does NX. | |
543 | */ | |
544 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
545 | r = RESUME_GUEST; | |
546 | } else { | |
547 | vcpu->arch.shared->msr |= | |
548 | to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | |
549 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
550 | r = RESUME_GUEST; | |
551 | } | |
552 | break; | |
553 | case BOOK3S_INTERRUPT_DATA_STORAGE: | |
554 | { | |
555 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
556 | vcpu->stat.pf_storage++; | |
557 | ||
558 | #ifdef CONFIG_PPC_BOOK3S_32 | |
559 | /* We set segments as unused segments when invalidating them. So | |
560 | * treat the respective fault as segment fault. */ | |
561 | if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { | |
562 | kvmppc_mmu_map_segment(vcpu, dar); | |
563 | r = RESUME_GUEST; | |
564 | break; | |
565 | } | |
566 | #endif | |
567 | ||
568 | /* The only case we need to handle is missing shadow PTEs */ | |
569 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { | |
570 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | |
571 | } else { | |
572 | vcpu->arch.shared->dar = dar; | |
573 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; | |
574 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
575 | r = RESUME_GUEST; | |
576 | } | |
577 | break; | |
578 | } | |
579 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
580 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
581 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
582 | kvmppc_book3s_queue_irqprio(vcpu, | |
583 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
584 | } | |
585 | r = RESUME_GUEST; | |
586 | break; | |
587 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
588 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
589 | kvmppc_book3s_queue_irqprio(vcpu, | |
590 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
591 | } | |
592 | r = RESUME_GUEST; | |
593 | break; | |
594 | /* We're good on these - the host merely wanted to get our attention */ | |
595 | case BOOK3S_INTERRUPT_DECREMENTER: | |
596 | vcpu->stat.dec_exits++; | |
597 | r = RESUME_GUEST; | |
598 | break; | |
599 | case BOOK3S_INTERRUPT_EXTERNAL: | |
600 | vcpu->stat.ext_intr_exits++; | |
601 | r = RESUME_GUEST; | |
602 | break; | |
603 | case BOOK3S_INTERRUPT_PERFMON: | |
604 | r = RESUME_GUEST; | |
605 | break; | |
606 | case BOOK3S_INTERRUPT_PROGRAM: | |
607 | { | |
608 | enum emulation_result er; | |
609 | ulong flags; | |
610 | ||
611 | program_interrupt: | |
612 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; | |
613 | ||
614 | if (vcpu->arch.shared->msr & MSR_PR) { | |
615 | #ifdef EXIT_DEBUG | |
616 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
617 | #endif | |
618 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != | |
619 | (INS_DCBZ & 0xfffffff7)) { | |
620 | kvmppc_core_queue_program(vcpu, flags); | |
621 | r = RESUME_GUEST; | |
622 | break; | |
623 | } | |
624 | } | |
625 | ||
626 | vcpu->stat.emulated_inst_exits++; | |
627 | er = kvmppc_emulate_instruction(run, vcpu); | |
628 | switch (er) { | |
629 | case EMULATE_DONE: | |
630 | r = RESUME_GUEST_NV; | |
631 | break; | |
632 | case EMULATE_AGAIN: | |
633 | r = RESUME_GUEST; | |
634 | break; | |
635 | case EMULATE_FAIL: | |
636 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
637 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
638 | kvmppc_core_queue_program(vcpu, flags); | |
639 | r = RESUME_GUEST; | |
640 | break; | |
641 | case EMULATE_DO_MMIO: | |
642 | run->exit_reason = KVM_EXIT_MMIO; | |
643 | r = RESUME_HOST_NV; | |
644 | break; | |
645 | default: | |
646 | BUG(); | |
647 | } | |
648 | break; | |
649 | } | |
650 | case BOOK3S_INTERRUPT_SYSCALL: | |
a668f2bd AG |
651 | if (vcpu->arch.papr_enabled && |
652 | (kvmppc_get_last_inst(vcpu) == 0x44000022) && | |
653 | !(vcpu->arch.shared->msr & MSR_PR)) { | |
654 | /* SC 1 papr hypercalls */ | |
655 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
656 | int i; | |
657 | ||
658 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { | |
659 | r = RESUME_GUEST; | |
660 | break; | |
661 | } | |
662 | ||
663 | run->papr_hcall.nr = cmd; | |
664 | for (i = 0; i < 9; ++i) { | |
665 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
666 | run->papr_hcall.args[i] = gpr; | |
667 | } | |
668 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
669 | vcpu->arch.hcall_needed = 1; | |
670 | r = RESUME_HOST; | |
671 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
672 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
673 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
674 | /* MOL hypercalls */ | |
675 | u64 *gprs = run->osi.gprs; | |
676 | int i; | |
677 | ||
678 | run->exit_reason = KVM_EXIT_OSI; | |
679 | for (i = 0; i < 32; i++) | |
680 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
681 | vcpu->arch.osi_needed = 1; | |
682 | r = RESUME_HOST_NV; | |
683 | } else if (!(vcpu->arch.shared->msr & MSR_PR) && | |
684 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | |
685 | /* KVM PV hypercalls */ | |
686 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
687 | r = RESUME_GUEST; | |
688 | } else { | |
689 | /* Guest syscalls */ | |
690 | vcpu->stat.syscall_exits++; | |
691 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
692 | r = RESUME_GUEST; | |
693 | } | |
694 | break; | |
695 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
696 | case BOOK3S_INTERRUPT_ALTIVEC: | |
697 | case BOOK3S_INTERRUPT_VSX: | |
698 | { | |
699 | int ext_msr = 0; | |
700 | ||
701 | switch (exit_nr) { | |
702 | case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; | |
703 | case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; | |
704 | case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; | |
705 | } | |
706 | ||
707 | switch (kvmppc_check_ext(vcpu, exit_nr)) { | |
708 | case EMULATE_DONE: | |
709 | /* everything ok - let's enable the ext */ | |
710 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
711 | break; | |
712 | case EMULATE_FAIL: | |
713 | /* we need to emulate this instruction */ | |
714 | goto program_interrupt; | |
715 | break; | |
716 | default: | |
717 | /* nothing to worry about - go again */ | |
718 | break; | |
719 | } | |
720 | break; | |
721 | } | |
722 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
723 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | |
724 | vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, | |
725 | kvmppc_get_last_inst(vcpu)); | |
726 | vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, | |
727 | kvmppc_get_last_inst(vcpu)); | |
728 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
729 | } | |
730 | r = RESUME_GUEST; | |
731 | break; | |
732 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
733 | case BOOK3S_INTERRUPT_TRACE: | |
734 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
735 | r = RESUME_GUEST; | |
736 | break; | |
737 | default: | |
738 | /* Ugh - bork here! What did we get? */ | |
739 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
740 | exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); | |
741 | r = RESUME_HOST; | |
742 | BUG(); | |
743 | break; | |
744 | } | |
745 | ||
746 | ||
747 | if (!(r & RESUME_HOST)) { | |
748 | /* To avoid clobbering exit_reason, only check for signals if | |
749 | * we aren't already exiting to userspace for some other | |
750 | * reason. */ | |
751 | if (signal_pending(current)) { | |
752 | #ifdef EXIT_DEBUG | |
753 | printk(KERN_EMERG "KVM: Going back to host\n"); | |
754 | #endif | |
755 | vcpu->stat.signal_exits++; | |
756 | run->exit_reason = KVM_EXIT_INTR; | |
757 | r = -EINTR; | |
758 | } else { | |
759 | /* In case an interrupt came in that was triggered | |
760 | * from userspace (like DEC), we need to check what | |
761 | * to inject now! */ | |
762 | kvmppc_core_deliver_interrupts(vcpu); | |
763 | } | |
764 | } | |
765 | ||
766 | trace_kvm_book3s_reenter(r, vcpu); | |
767 | ||
768 | return r; | |
769 | } | |
770 | ||
771 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
772 | struct kvm_sregs *sregs) | |
773 | { | |
774 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
775 | int i; | |
776 | ||
777 | sregs->pvr = vcpu->arch.pvr; | |
778 | ||
779 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
780 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
781 | for (i = 0; i < 64; i++) { | |
782 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
783 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
784 | } | |
785 | } else { | |
786 | for (i = 0; i < 16; i++) | |
787 | sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; | |
788 | ||
789 | for (i = 0; i < 8; i++) { | |
790 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
791 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
792 | } | |
793 | } | |
794 | ||
a15bd354 AG |
795 | if (sregs->u.s.flags & KVM_SREGS_S_HIOR) |
796 | sregs->u.s.hior = to_book3s(vcpu)->hior; | |
797 | ||
f05ed4d5 PM |
798 | return 0; |
799 | } | |
800 | ||
801 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
802 | struct kvm_sregs *sregs) | |
803 | { | |
804 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
805 | int i; | |
806 | ||
807 | kvmppc_set_pvr(vcpu, sregs->pvr); | |
808 | ||
809 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
810 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
811 | for (i = 0; i < 64; i++) { | |
812 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
813 | sregs->u.s.ppc64.slb[i].slbe); | |
814 | } | |
815 | } else { | |
816 | for (i = 0; i < 16; i++) { | |
817 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
818 | } | |
819 | for (i = 0; i < 8; i++) { | |
820 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
821 | (u32)sregs->u.s.ppc32.ibat[i]); | |
822 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
823 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
824 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
825 | (u32)sregs->u.s.ppc32.dbat[i]); | |
826 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
827 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
828 | } | |
829 | } | |
830 | ||
831 | /* Flush the MMU after messing with the segments */ | |
832 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
833 | ||
a15bd354 AG |
834 | if (sregs->u.s.flags & KVM_SREGS_S_HIOR) { |
835 | to_book3s(vcpu)->hior_sregs = true; | |
836 | to_book3s(vcpu)->hior = sregs->u.s.hior; | |
837 | } | |
838 | ||
f05ed4d5 PM |
839 | return 0; |
840 | } | |
841 | ||
842 | int kvmppc_core_check_processor_compat(void) | |
843 | { | |
844 | return 0; | |
845 | } | |
846 | ||
847 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
848 | { | |
849 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
850 | struct kvm_vcpu *vcpu; | |
851 | int err = -ENOMEM; | |
852 | unsigned long p; | |
853 | ||
854 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); | |
855 | if (!vcpu_book3s) | |
856 | goto out; | |
857 | ||
858 | vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) | |
859 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | |
860 | if (!vcpu_book3s->shadow_vcpu) | |
861 | goto free_vcpu; | |
862 | ||
863 | vcpu = &vcpu_book3s->vcpu; | |
864 | err = kvm_vcpu_init(vcpu, kvm, id); | |
865 | if (err) | |
866 | goto free_shadow_vcpu; | |
867 | ||
868 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | |
869 | /* the real shared page fills the last 4k of our page */ | |
870 | vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); | |
871 | if (!p) | |
872 | goto uninit_vcpu; | |
873 | ||
874 | vcpu->arch.host_retip = kvm_return_point; | |
875 | vcpu->arch.host_msr = mfmsr(); | |
876 | #ifdef CONFIG_PPC_BOOK3S_64 | |
877 | /* default to book3s_64 (970fx) */ | |
878 | vcpu->arch.pvr = 0x3C0301; | |
879 | #else | |
880 | /* default to book3s_32 (750) */ | |
881 | vcpu->arch.pvr = 0x84202; | |
882 | #endif | |
883 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
884 | vcpu->arch.slb_nr = 64; | |
885 | ||
886 | /* remember where some real-mode handlers are */ | |
887 | vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline); | |
888 | vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter); | |
889 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | |
890 | #ifdef CONFIG_PPC_BOOK3S_64 | |
891 | vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; | |
892 | #else | |
893 | vcpu->arch.rmcall = (ulong)kvmppc_rmcall; | |
894 | #endif | |
895 | ||
896 | vcpu->arch.shadow_msr = MSR_USER64; | |
897 | ||
898 | err = kvmppc_mmu_init(vcpu); | |
899 | if (err < 0) | |
900 | goto uninit_vcpu; | |
901 | ||
902 | return vcpu; | |
903 | ||
904 | uninit_vcpu: | |
905 | kvm_vcpu_uninit(vcpu); | |
906 | free_shadow_vcpu: | |
907 | kfree(vcpu_book3s->shadow_vcpu); | |
908 | free_vcpu: | |
909 | vfree(vcpu_book3s); | |
910 | out: | |
911 | return ERR_PTR(err); | |
912 | } | |
913 | ||
914 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
915 | { | |
916 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
917 | ||
918 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | |
919 | kvm_vcpu_uninit(vcpu); | |
920 | kfree(vcpu_book3s->shadow_vcpu); | |
921 | vfree(vcpu_book3s); | |
922 | } | |
923 | ||
df6909e5 | 924 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
925 | { |
926 | int ret; | |
927 | double fpr[32][TS_FPRWIDTH]; | |
928 | unsigned int fpscr; | |
929 | int fpexc_mode; | |
930 | #ifdef CONFIG_ALTIVEC | |
931 | vector128 vr[32]; | |
932 | vector128 vscr; | |
933 | unsigned long uninitialized_var(vrsave); | |
934 | int used_vr; | |
935 | #endif | |
936 | #ifdef CONFIG_VSX | |
937 | int used_vsr; | |
938 | #endif | |
939 | ulong ext_msr; | |
940 | ||
941 | /* No need to go into the guest when all we do is going out */ | |
942 | if (signal_pending(current)) { | |
943 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
944 | return -EINTR; | |
945 | } | |
946 | ||
947 | /* Save FPU state in stack */ | |
948 | if (current->thread.regs->msr & MSR_FP) | |
949 | giveup_fpu(current); | |
950 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | |
951 | fpscr = current->thread.fpscr.val; | |
952 | fpexc_mode = current->thread.fpexc_mode; | |
953 | ||
954 | #ifdef CONFIG_ALTIVEC | |
955 | /* Save Altivec state in stack */ | |
956 | used_vr = current->thread.used_vr; | |
957 | if (used_vr) { | |
958 | if (current->thread.regs->msr & MSR_VEC) | |
959 | giveup_altivec(current); | |
960 | memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); | |
961 | vscr = current->thread.vscr; | |
962 | vrsave = current->thread.vrsave; | |
963 | } | |
964 | #endif | |
965 | ||
966 | #ifdef CONFIG_VSX | |
967 | /* Save VSX state in stack */ | |
968 | used_vsr = current->thread.used_vsr; | |
969 | if (used_vsr && (current->thread.regs->msr & MSR_VSX)) | |
970 | __giveup_vsx(current); | |
971 | #endif | |
972 | ||
973 | /* Remember the MSR with disabled extensions */ | |
974 | ext_msr = current->thread.regs->msr; | |
975 | ||
f05ed4d5 PM |
976 | /* Preload FPU if it's enabled */ |
977 | if (vcpu->arch.shared->msr & MSR_FP) | |
978 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
979 | ||
df6909e5 PM |
980 | kvm_guest_enter(); |
981 | ||
982 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | |
983 | ||
984 | kvm_guest_exit(); | |
f05ed4d5 PM |
985 | |
986 | local_irq_disable(); | |
987 | ||
988 | current->thread.regs->msr = ext_msr; | |
989 | ||
990 | /* Make sure we save the guest FPU/Altivec/VSX state */ | |
991 | kvmppc_giveup_ext(vcpu, MSR_FP); | |
992 | kvmppc_giveup_ext(vcpu, MSR_VEC); | |
993 | kvmppc_giveup_ext(vcpu, MSR_VSX); | |
994 | ||
995 | /* Restore FPU state from stack */ | |
996 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | |
997 | current->thread.fpscr.val = fpscr; | |
998 | current->thread.fpexc_mode = fpexc_mode; | |
999 | ||
1000 | #ifdef CONFIG_ALTIVEC | |
1001 | /* Restore Altivec state from stack */ | |
1002 | if (used_vr && current->thread.used_vr) { | |
1003 | memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); | |
1004 | current->thread.vscr = vscr; | |
1005 | current->thread.vrsave = vrsave; | |
1006 | } | |
1007 | current->thread.used_vr = used_vr; | |
1008 | #endif | |
1009 | ||
1010 | #ifdef CONFIG_VSX | |
1011 | current->thread.used_vsr = used_vsr; | |
1012 | #endif | |
1013 | ||
1014 | return ret; | |
1015 | } | |
1016 | ||
f9e0554d PM |
1017 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1018 | struct kvm_userspace_memory_region *mem) | |
1019 | { | |
1020 | return 0; | |
1021 | } | |
1022 | ||
1023 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
1024 | struct kvm_userspace_memory_region *mem) | |
1025 | { | |
1026 | } | |
1027 | ||
1028 | int kvmppc_core_init_vm(struct kvm *kvm) | |
1029 | { | |
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | void kvmppc_core_destroy_vm(struct kvm *kvm) | |
1034 | { | |
1035 | } | |
1036 | ||
f05ed4d5 PM |
1037 | static int kvmppc_book3s_init(void) |
1038 | { | |
1039 | int r; | |
1040 | ||
1041 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | |
1042 | THIS_MODULE); | |
1043 | ||
1044 | if (r) | |
1045 | return r; | |
1046 | ||
1047 | r = kvmppc_mmu_hpte_sysinit(); | |
1048 | ||
1049 | return r; | |
1050 | } | |
1051 | ||
1052 | static void kvmppc_book3s_exit(void) | |
1053 | { | |
1054 | kvmppc_mmu_hpte_sysexit(); | |
1055 | kvm_exit(); | |
1056 | } | |
1057 | ||
1058 | module_init(kvmppc_book3s_init); | |
1059 | module_exit(kvmppc_book3s_exit); |