KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
authorScott Wood <scottwood@freescale.com>
Wed, 9 Nov 2011 00:23:30 +0000 (18:23 -0600)
committerAvi Kivity <avi@redhat.com>
Mon, 5 Mar 2012 12:52:26 +0000 (14:52 +0200)
This allows additional registers to be accessed by the guest
in PR-mode KVM without trapping.

SPRG4-7 are readable from userspace.  On booke, KVM will sync
these registers when it enters the guest, so that accesses from
guest userspace will work.  The guest kernel, OTOH, must consistently
use either the real registers or the shared area between exits.  This
also applies to the already-paravirted SPRG3.

On non-booke, it's not clear to what extent SPRG4-7 are supported
(they're not architected for book3s, but exist on at least some classic
chips).  They are copied in the get/set regs ioctls, but I do not see any
non-booke emulation.  I also do not see any syncing with real registers
(in PR-mode) including the user-readable SPRG3.  This patch should not
make that situation any worse.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
15 files changed:
arch/powerpc/include/asm/kvm_e500.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_para.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/kvm.c
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke_emulate.c
arch/powerpc/kvm/booke_interrupts.S
arch/powerpc/kvm/e500.c
arch/powerpc/kvm/e500_emulate.c
arch/powerpc/kvm/e500_tlb.c
arch/powerpc/kvm/e500_tlb.h
arch/powerpc/kvm/emulate.c
arch/powerpc/kvm/powerpc.c

index bc17441535f208a8a4ebba9e054604938b1adf3e..8cd50a514271b68a5b1f83abd0a891cbeb80b18f 100644 (file)
@@ -71,14 +71,6 @@ struct kvmppc_vcpu_e500 {
        u32 pid[E500_PID_NUM];
        u32 svr;
 
-       u32 mas0;
-       u32 mas1;
-       u32 mas2;
-       u64 mas7_3;
-       u32 mas4;
-       u32 mas5;
-       u32 mas6;
-
        /* vcpu id table */
        struct vcpu_id_table *idt;
 
index bf8af5d5d5dc6e6ad0a8799f69394b17c4e44300..bfd0c9912da53e85da17549b4c701388572f46b4 100644 (file)
@@ -318,10 +318,6 @@ struct kvm_vcpu_arch {
        u32 vrsave; /* also USPRG0 */
        u32 mmucr;
        ulong shadow_msr;
-       ulong sprg4;
-       ulong sprg5;
-       ulong sprg6;
-       ulong sprg7;
        ulong csrr0;
        ulong csrr1;
        ulong dsrr0;
@@ -329,7 +325,6 @@ struct kvm_vcpu_arch {
        ulong mcsrr0;
        ulong mcsrr1;
        ulong mcsr;
-       ulong esr;
        u32 dec;
        u32 decar;
        u32 tbl;
@@ -338,7 +333,6 @@ struct kvm_vcpu_arch {
        u32 tsr;
        u32 ivor[64];
        ulong ivpr;
-       u32 pir;
        u32 pvr;
 
        u32 shadow_pid;
index 50533f9adf40548671c6b28473d783d950b40156..ece70fb36513caf3c18249dfb6001eee7248600f 100644 (file)
@@ -33,11 +33,35 @@ struct kvm_vcpu_arch_shared {
        __u64 sprg3;
        __u64 srr0;
        __u64 srr1;
-       __u64 dar;
+       __u64 dar;              /* dear on BookE */
        __u64 msr;
        __u32 dsisr;
        __u32 int_pending;      /* Tells the guest if we have an interrupt */
        __u32 sr[16];
+       __u32 mas0;
+       __u32 mas1;
+       __u64 mas7_3;
+       __u64 mas2;
+       __u32 mas4;
+       __u32 mas6;
+       __u32 esr;
+       __u32 pir;
+
+       /*
+        * SPRG4-7 are user-readable, so we can only keep these consistent
+        * between the shared area and the real registers when there's an
+        * intervening exit to KVM.  This also applies to SPRG3 on some
+        * chips.
+        *
+        * This suffices for access by guest userspace, since in PR-mode
+        * KVM, an exit must occur when changing the guest's MSR[PR].
+        * If the guest kernel writes to SPRG3-7 via the shared area, it
+        * must also use the shared area for reading while in kernel space.
+        */
+       __u64 sprg4;
+       __u64 sprg5;
+       __u64 sprg6;
+       __u64 sprg7;
 };
 
 #define KVM_SC_MAGIC_R0                0x4b564d21 /* "KVM!" */
@@ -47,7 +71,10 @@ struct kvm_vcpu_arch_shared {
 
 #define KVM_FEATURE_MAGIC_PAGE 1
 
-#define KVM_MAGIC_FEAT_SR      (1 << 0)
+#define KVM_MAGIC_FEAT_SR              (1 << 0)
+
+/* MASn, ESR, PIR, and high SPRGs */
+#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7   (1 << 1)
 
 #ifdef __KERNEL__
 
index 04caee7d9bc185a223841b4827da5576359f5be1..e7bfcf81b7461284a0c1d50ea7ae0579bfe75711 100644 (file)
@@ -426,16 +426,23 @@ int main(void)
        DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
        DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
 #endif
-       DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
-       DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
-       DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
-       DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
+       DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
+       DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
+       DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
+       DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7));
        DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
        DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
        DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
        DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
        DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
 
+       DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
+       DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
+       DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
+       DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
+       DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
+       DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
+
        /* book3s */
 #ifdef CONFIG_KVM_BOOK3S_64_HV
        DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
index 06b15ee997f7409b9fabf1b85fa028c4f5b3574d..04d4b5aa6dcae5eaac326972d9a028671f086b6e 100644 (file)
 #define KVM_RT_30              0x03c00000
 #define KVM_MASK_RB            0x0000f800
 #define KVM_INST_MFMSR         0x7c0000a6
-#define KVM_INST_MFSPR_SPRG0   0x7c1042a6
-#define KVM_INST_MFSPR_SPRG1   0x7c1142a6
-#define KVM_INST_MFSPR_SPRG2   0x7c1242a6
-#define KVM_INST_MFSPR_SPRG3   0x7c1342a6
-#define KVM_INST_MFSPR_SRR0    0x7c1a02a6
-#define KVM_INST_MFSPR_SRR1    0x7c1b02a6
-#define KVM_INST_MFSPR_DAR     0x7c1302a6
-#define KVM_INST_MFSPR_DSISR   0x7c1202a6
-
-#define KVM_INST_MTSPR_SPRG0   0x7c1043a6
-#define KVM_INST_MTSPR_SPRG1   0x7c1143a6
-#define KVM_INST_MTSPR_SPRG2   0x7c1243a6
-#define KVM_INST_MTSPR_SPRG3   0x7c1343a6
-#define KVM_INST_MTSPR_SRR0    0x7c1a03a6
-#define KVM_INST_MTSPR_SRR1    0x7c1b03a6
-#define KVM_INST_MTSPR_DAR     0x7c1303a6
-#define KVM_INST_MTSPR_DSISR   0x7c1203a6
+
+#define SPR_FROM               0
+#define SPR_TO                 0x100
+
+#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
+                                   (((sprn) & 0x1f) << 16) | \
+                                   (((sprn) & 0x3e0) << 6) | \
+                                   (moveto))
+
+#define KVM_INST_MFSPR(sprn)   KVM_INST_SPR(sprn, SPR_FROM)
+#define KVM_INST_MTSPR(sprn)   KVM_INST_SPR(sprn, SPR_TO)
 
 #define KVM_INST_TLBSYNC       0x7c00046c
 #define KVM_INST_MTMSRD_L0     0x7c000164
@@ -440,56 +434,191 @@ static void kvm_check_ins(u32 *inst, u32 features)
        case KVM_INST_MFMSR:
                kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
                break;
-       case KVM_INST_MFSPR_SPRG0:
+       case KVM_INST_MFSPR(SPRN_SPRG0):
                kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
                break;
-       case KVM_INST_MFSPR_SPRG1:
+       case KVM_INST_MFSPR(SPRN_SPRG1):
                kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
                break;
-       case KVM_INST_MFSPR_SPRG2:
+       case KVM_INST_MFSPR(SPRN_SPRG2):
                kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
                break;
-       case KVM_INST_MFSPR_SPRG3:
+       case KVM_INST_MFSPR(SPRN_SPRG3):
                kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
                break;
-       case KVM_INST_MFSPR_SRR0:
+       case KVM_INST_MFSPR(SPRN_SRR0):
                kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
                break;
-       case KVM_INST_MFSPR_SRR1:
+       case KVM_INST_MFSPR(SPRN_SRR1):
                kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
                break;
-       case KVM_INST_MFSPR_DAR:
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MFSPR(SPRN_DEAR):
+#else
+       case KVM_INST_MFSPR(SPRN_DAR):
+#endif
                kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
                break;
-       case KVM_INST_MFSPR_DSISR:
+       case KVM_INST_MFSPR(SPRN_DSISR):
                kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
                break;
 
+#ifdef CONFIG_PPC_BOOK3E_MMU
+       case KVM_INST_MFSPR(SPRN_MAS0):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_MAS1):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_MAS2):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_MAS3):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_MAS4):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_MAS6):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_MAS7):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
+               break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+       case KVM_INST_MFSPR(SPRN_SPRG4):
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MFSPR(SPRN_SPRG4R):
+#endif
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_SPRG5):
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MFSPR(SPRN_SPRG5R):
+#endif
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_SPRG6):
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MFSPR(SPRN_SPRG6R):
+#endif
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
+               break;
+       case KVM_INST_MFSPR(SPRN_SPRG7):
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MFSPR(SPRN_SPRG7R):
+#endif
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
+               break;
+
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MFSPR(SPRN_ESR):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
+               break;
+#endif
+
+       case KVM_INST_MFSPR(SPRN_PIR):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
+               break;
+
+
        /* Stores */
-       case KVM_INST_MTSPR_SPRG0:
+       case KVM_INST_MTSPR(SPRN_SPRG0):
                kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
                break;
-       case KVM_INST_MTSPR_SPRG1:
+       case KVM_INST_MTSPR(SPRN_SPRG1):
                kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
                break;
-       case KVM_INST_MTSPR_SPRG2:
+       case KVM_INST_MTSPR(SPRN_SPRG2):
                kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
                break;
-       case KVM_INST_MTSPR_SPRG3:
+       case KVM_INST_MTSPR(SPRN_SPRG3):
                kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
                break;
-       case KVM_INST_MTSPR_SRR0:
+       case KVM_INST_MTSPR(SPRN_SRR0):
                kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
                break;
-       case KVM_INST_MTSPR_SRR1:
+       case KVM_INST_MTSPR(SPRN_SRR1):
                kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
                break;
-       case KVM_INST_MTSPR_DAR:
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MTSPR(SPRN_DEAR):
+#else
+       case KVM_INST_MTSPR(SPRN_DAR):
+#endif
                kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
                break;
-       case KVM_INST_MTSPR_DSISR:
+       case KVM_INST_MTSPR(SPRN_DSISR):
                kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
                break;
+#ifdef CONFIG_PPC_BOOK3E_MMU
+       case KVM_INST_MTSPR(SPRN_MAS0):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_MAS1):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_MAS2):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_MAS3):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_MAS4):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_MAS6):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_MAS7):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
+               break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+       case KVM_INST_MTSPR(SPRN_SPRG4):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_SPRG5):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_SPRG6):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
+               break;
+       case KVM_INST_MTSPR(SPRN_SPRG7):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
+               break;
+
+#ifdef CONFIG_BOOKE
+       case KVM_INST_MTSPR(SPRN_ESR):
+               if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+                       kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
+               break;
+#endif
 
        /* Nops */
        case KVM_INST_TLBSYNC:
@@ -556,9 +685,18 @@ static void kvm_use_magic_page(void)
        start = (void*)_stext;
        end = (void*)_etext;
 
+       /*
+        * Being interrupted in the middle of patching would
+        * be bad for SPRG4-7, which KVM can't keep in sync
+        * with emulated accesses because reads don't trap.
+        */
+       local_irq_disable();
+
        for (p = start; p < end; p++)
                kvm_check_ins(p, features);
 
+       local_irq_enable();
+
        printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
                         kvm_patching_worked ? "worked" : "failed");
 }
index 73fc9f0461077b83e003b0be43ccf0e381cece5a..5398744cd773d82ccd0bf33f8b3adec4aeec0e56 100644 (file)
@@ -423,10 +423,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->sprg1 = vcpu->arch.shared->sprg1;
        regs->sprg2 = vcpu->arch.shared->sprg2;
        regs->sprg3 = vcpu->arch.shared->sprg3;
-       regs->sprg4 = vcpu->arch.sprg4;
-       regs->sprg5 = vcpu->arch.sprg5;
-       regs->sprg6 = vcpu->arch.sprg6;
-       regs->sprg7 = vcpu->arch.sprg7;
+       regs->sprg4 = vcpu->arch.shared->sprg4;
+       regs->sprg5 = vcpu->arch.shared->sprg5;
+       regs->sprg6 = vcpu->arch.shared->sprg6;
+       regs->sprg7 = vcpu->arch.shared->sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -450,10 +450,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.shared->sprg1 = regs->sprg1;
        vcpu->arch.shared->sprg2 = regs->sprg2;
        vcpu->arch.shared->sprg3 = regs->sprg3;
-       vcpu->arch.sprg4 = regs->sprg4;
-       vcpu->arch.sprg5 = regs->sprg5;
-       vcpu->arch.sprg6 = regs->sprg6;
-       vcpu->arch.sprg7 = regs->sprg7;
+       vcpu->arch.shared->sprg4 = regs->sprg4;
+       vcpu->arch.shared->sprg5 = regs->sprg5;
+       vcpu->arch.shared->sprg6 = regs->sprg6;
+       vcpu->arch.shared->sprg7 = regs->sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
index 8dfc59a8a715508459719b5f43f7249c164661a6..50803dd0b8f2a84702cd3fc40cb6772e8330590a 100644 (file)
@@ -270,7 +270,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
                vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
                if (update_esr == true)
-                       vcpu->arch.esr = vcpu->arch.queued_esr;
+                       vcpu->arch.shared->esr = vcpu->arch.queued_esr;
                if (update_dear == true)
                        vcpu->arch.shared->dar = vcpu->arch.queued_dear;
                kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
@@ -644,6 +644,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        vcpu->arch.pc = 0;
        vcpu->arch.shared->msr = 0;
        vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
+       vcpu->arch.shared->pir = vcpu->vcpu_id;
        kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
 
        vcpu->arch.shadow_pid = 1;
@@ -678,10 +679,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->sprg1 = vcpu->arch.shared->sprg1;
        regs->sprg2 = vcpu->arch.shared->sprg2;
        regs->sprg3 = vcpu->arch.shared->sprg3;
-       regs->sprg4 = vcpu->arch.sprg4;
-       regs->sprg5 = vcpu->arch.sprg5;
-       regs->sprg6 = vcpu->arch.sprg6;
-       regs->sprg7 = vcpu->arch.sprg7;
+       regs->sprg4 = vcpu->arch.shared->sprg4;
+       regs->sprg5 = vcpu->arch.shared->sprg5;
+       regs->sprg6 = vcpu->arch.shared->sprg6;
+       regs->sprg7 = vcpu->arch.shared->sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -706,10 +707,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.shared->sprg1 = regs->sprg1;
        vcpu->arch.shared->sprg2 = regs->sprg2;
        vcpu->arch.shared->sprg3 = regs->sprg3;
-       vcpu->arch.sprg4 = regs->sprg4;
-       vcpu->arch.sprg5 = regs->sprg5;
-       vcpu->arch.sprg6 = regs->sprg6;
-       vcpu->arch.sprg7 = regs->sprg7;
+       vcpu->arch.shared->sprg4 = regs->sprg4;
+       vcpu->arch.shared->sprg5 = regs->sprg5;
+       vcpu->arch.shared->sprg6 = regs->sprg6;
+       vcpu->arch.shared->sprg7 = regs->sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -727,7 +728,7 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
        sregs->u.e.csrr0 = vcpu->arch.csrr0;
        sregs->u.e.csrr1 = vcpu->arch.csrr1;
        sregs->u.e.mcsr = vcpu->arch.mcsr;
-       sregs->u.e.esr = vcpu->arch.esr;
+       sregs->u.e.esr = vcpu->arch.shared->esr;
        sregs->u.e.dear = vcpu->arch.shared->dar;
        sregs->u.e.tsr = vcpu->arch.tsr;
        sregs->u.e.tcr = vcpu->arch.tcr;
@@ -745,7 +746,7 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
        vcpu->arch.csrr0 = sregs->u.e.csrr0;
        vcpu->arch.csrr1 = sregs->u.e.csrr1;
        vcpu->arch.mcsr = sregs->u.e.mcsr;
-       vcpu->arch.esr = sregs->u.e.esr;
+       vcpu->arch.shared->esr = sregs->u.e.esr;
        vcpu->arch.shared->dar = sregs->u.e.dear;
        vcpu->arch.vrsave = sregs->u.e.vrsave;
        vcpu->arch.tcr = sregs->u.e.tcr;
index 1260f5f24c0c3df9827a4d08131ad589ee83d170..bae9288ac1e1806eb3366bc750ee385bbb4de108 100644 (file)
@@ -107,7 +107,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
        case SPRN_DEAR:
                vcpu->arch.shared->dar = spr_val; break;
        case SPRN_ESR:
-               vcpu->arch.esr = spr_val; break;
+               vcpu->arch.shared->esr = spr_val; break;
        case SPRN_DBCR0:
                vcpu->arch.dbcr0 = spr_val; break;
        case SPRN_DBCR1:
@@ -125,13 +125,13 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
         * loaded into the real SPRGs when resuming the
         * guest. */
        case SPRN_SPRG4:
-               vcpu->arch.sprg4 = spr_val; break;
+               vcpu->arch.shared->sprg4 = spr_val; break;
        case SPRN_SPRG5:
-               vcpu->arch.sprg5 = spr_val; break;
+               vcpu->arch.shared->sprg5 = spr_val; break;
        case SPRN_SPRG6:
-               vcpu->arch.sprg6 = spr_val; break;
+               vcpu->arch.shared->sprg6 = spr_val; break;
        case SPRN_SPRG7:
-               vcpu->arch.sprg7 = spr_val; break;
+               vcpu->arch.shared->sprg7 = spr_val; break;
 
        case SPRN_IVPR:
                vcpu->arch.ivpr = spr_val;
@@ -202,7 +202,7 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
        case SPRN_DEAR:
                kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
        case SPRN_ESR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
+               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
        case SPRN_DBCR0:
                kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
        case SPRN_DBCR1:
index 42f2fb1f66e9dc1bad51b44334dca63fc29d8365..10d8ef602e5c9303795c7b78268d5691be79eeec 100644 (file)
@@ -402,19 +402,25 @@ lightweight_exit:
        /* Save vcpu pointer for the exception handlers. */
        mtspr   SPRN_SPRG_WVCPU, r4
 
+       lwz     r5, VCPU_SHARED(r4)
+
        /* Can't switch the stack pointer until after IVPR is switched,
         * because host interrupt handlers would get confused. */
        lwz     r1, VCPU_GPR(r1)(r4)
 
-       /* Host interrupt handlers may have clobbered these guest-readable
-        * SPRGs, so we need to reload them here with the guest's values. */
-       lwz     r3, VCPU_SPRG4(r4)
+       /*
+        * Host interrupt handlers may have clobbered these
+        * guest-readable SPRGs, or the guest kernel may have
+        * written directly to the shared area, so we
+        * need to reload them here with the guest's values.
+        */
+       lwz     r3, VCPU_SHARED_SPRG4(r5)
        mtspr   SPRN_SPRG4W, r3
-       lwz     r3, VCPU_SPRG5(r4)
+       lwz     r3, VCPU_SHARED_SPRG5(r5)
        mtspr   SPRN_SPRG5W, r3
-       lwz     r3, VCPU_SPRG6(r4)
+       lwz     r3, VCPU_SHARED_SPRG6(r5)
        mtspr   SPRN_SPRG6W, r3
-       lwz     r3, VCPU_SPRG7(r4)
+       lwz     r3, VCPU_SHARED_SPRG7(r5)
        mtspr   SPRN_SPRG7W, r3
 
 #ifdef CONFIG_KVM_EXIT_TIMING
index ac3c4bf21677e3de73c5650fed861216475b204a..709d82f956e3d9ab0db57f8cb5fdd722ff339530 100644 (file)
@@ -115,12 +115,12 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
        sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
 
-       sregs->u.e.mas0 = vcpu_e500->mas0;
-       sregs->u.e.mas1 = vcpu_e500->mas1;
-       sregs->u.e.mas2 = vcpu_e500->mas2;
-       sregs->u.e.mas7_3 = vcpu_e500->mas7_3;
-       sregs->u.e.mas4 = vcpu_e500->mas4;
-       sregs->u.e.mas6 = vcpu_e500->mas6;
+       sregs->u.e.mas0 = vcpu->arch.shared->mas0;
+       sregs->u.e.mas1 = vcpu->arch.shared->mas1;
+       sregs->u.e.mas2 = vcpu->arch.shared->mas2;
+       sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
+       sregs->u.e.mas4 = vcpu->arch.shared->mas4;
+       sregs->u.e.mas6 = vcpu->arch.shared->mas6;
 
        sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
        sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
@@ -148,12 +148,12 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        }
 
        if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
-               vcpu_e500->mas0 = sregs->u.e.mas0;
-               vcpu_e500->mas1 = sregs->u.e.mas1;
-               vcpu_e500->mas2 = sregs->u.e.mas2;
-               vcpu_e500->mas7_3 = sregs->u.e.mas7_3;
-               vcpu_e500->mas4 = sregs->u.e.mas4;
-               vcpu_e500->mas6 = sregs->u.e.mas6;
+               vcpu->arch.shared->mas0 = sregs->u.e.mas0;
+               vcpu->arch.shared->mas1 = sregs->u.e.mas1;
+               vcpu->arch.shared->mas2 = sregs->u.e.mas2;
+               vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
+               vcpu->arch.shared->mas4 = sregs->u.e.mas4;
+               vcpu->arch.shared->mas6 = sregs->u.e.mas6;
        }
 
        if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
index e0d36099c7567f55eaa129722fdf71a3ebdc5adc..6d0b2bd54fb0a9ffef74f3bfb21b04fa34aa4ce7 100644 (file)
@@ -89,22 +89,22 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
                        return EMULATE_FAIL;
                vcpu_e500->pid[2] = spr_val; break;
        case SPRN_MAS0:
-               vcpu_e500->mas0 = spr_val; break;
+               vcpu->arch.shared->mas0 = spr_val; break;
        case SPRN_MAS1:
-               vcpu_e500->mas1 = spr_val; break;
+               vcpu->arch.shared->mas1 = spr_val; break;
        case SPRN_MAS2:
-               vcpu_e500->mas2 = spr_val; break;
+               vcpu->arch.shared->mas2 = spr_val; break;
        case SPRN_MAS3:
-               vcpu_e500->mas7_3 &= ~(u64)0xffffffff;
-               vcpu_e500->mas7_3 |= spr_val;
+               vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
+               vcpu->arch.shared->mas7_3 |= spr_val;
                break;
        case SPRN_MAS4:
-               vcpu_e500->mas4 = spr_val; break;
+               vcpu->arch.shared->mas4 = spr_val; break;
        case SPRN_MAS6:
-               vcpu_e500->mas6 = spr_val; break;
+               vcpu->arch.shared->mas6 = spr_val; break;
        case SPRN_MAS7:
-               vcpu_e500->mas7_3 &= (u64)0xffffffff;
-               vcpu_e500->mas7_3 |= (u64)spr_val << 32;
+               vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
+               vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
                break;
        case SPRN_L1CSR0:
                vcpu_e500->l1csr0 = spr_val;
@@ -147,6 +147,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
        int emulated = EMULATE_DONE;
+       unsigned long val;
 
        switch (sprn) {
        case SPRN_PID:
@@ -156,20 +157,23 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
        case SPRN_PID2:
                kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
        case SPRN_MAS0:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
+               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
        case SPRN_MAS1:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
+               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
        case SPRN_MAS2:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
+               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
        case SPRN_MAS3:
-               kvmppc_set_gpr(vcpu, rt, (u32)vcpu_e500->mas7_3); break;
+               val = (u32)vcpu->arch.shared->mas7_3;
+               kvmppc_set_gpr(vcpu, rt, val);
+               break;
        case SPRN_MAS4:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
+               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
        case SPRN_MAS6:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
+               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
        case SPRN_MAS7:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7_3 >> 32); break;
-
+               val = vcpu->arch.shared->mas7_3 >> 32;
+               kvmppc_set_gpr(vcpu, rt, val);
+               break;
        case SPRN_TLB0CFG:
                kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
        case SPRN_TLB1CFG:
index 6fefb9144f23d73d6049ff8292e0cd14d7711ea9..9cd124a11acdbd83e68f0ed902cedc64e740bd33 100644 (file)
@@ -428,13 +428,14 @@ static int htlb0_set_base(gva_t addr)
                             host_tlb_params[0].ways);
 }
 
-static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel)
+static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
 {
-       unsigned int esel = get_tlb_esel_bit(vcpu_e500);
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       int esel = get_tlb_esel_bit(vcpu);
 
        if (tlbsel == 0) {
                esel &= vcpu_e500->gtlb_params[0].ways - 1;
-               esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2);
+               esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
        } else {
                esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
        }
@@ -545,20 +546,20 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
        int tlbsel;
 
        /* since we only have two TLBs, only lower bit is used. */
-       tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
+       tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
        victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
-       pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
-       tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
+       pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
+       tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
 
-       vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+       vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
                | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-       vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
+       vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
                | MAS1_TID(vcpu_e500->pid[pidsel])
                | MAS1_TSIZE(tsized);
-       vcpu_e500->mas2 = (eaddr & MAS2_EPN)
-               | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
-       vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
-       vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
+       vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
+               | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
+       vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
+       vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
                | (get_cur_pid(vcpu) << 16)
                | (as ? MAS6_SAS : 0);
 }
@@ -844,15 +845,15 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
        int tlbsel, esel;
        struct kvm_book3e_206_tlb_entry *gtlbe;
 
-       tlbsel = get_tlb_tlbsel(vcpu_e500);
-       esel = get_tlb_esel(vcpu_e500, tlbsel);
+       tlbsel = get_tlb_tlbsel(vcpu);
+       esel = get_tlb_esel(vcpu, tlbsel);
 
        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
-       vcpu_e500->mas0 &= ~MAS0_NV(~0);
-       vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-       vcpu_e500->mas1 = gtlbe->mas1;
-       vcpu_e500->mas2 = gtlbe->mas2;
-       vcpu_e500->mas7_3 = gtlbe->mas7_3;
+       vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
+       vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
+       vcpu->arch.shared->mas1 = gtlbe->mas1;
+       vcpu->arch.shared->mas2 = gtlbe->mas2;
+       vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
 
        return EMULATE_DONE;
 }
@@ -860,8 +861,8 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-       int as = !!get_cur_sas(vcpu_e500);
-       unsigned int pid = get_cur_spid(vcpu_e500);
+       int as = !!get_cur_sas(vcpu);
+       unsigned int pid = get_cur_spid(vcpu);
        int esel, tlbsel;
        struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
        gva_t ea;
@@ -879,26 +880,30 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
        if (gtlbe) {
                esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
 
-               vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
+               vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
                        | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-               vcpu_e500->mas1 = gtlbe->mas1;
-               vcpu_e500->mas2 = gtlbe->mas2;
-               vcpu_e500->mas7_3 = gtlbe->mas7_3;
+               vcpu->arch.shared->mas1 = gtlbe->mas1;
+               vcpu->arch.shared->mas2 = gtlbe->mas2;
+               vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
        } else {
                int victim;
 
                /* since we only have two TLBs, only lower bit is used. */
-               tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
+               tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
                victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
 
-               vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+               vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
+                       | MAS0_ESEL(victim)
                        | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-               vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
-                       | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
-                       | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
-               vcpu_e500->mas2 &= MAS2_EPN;
-               vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
-               vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
+               vcpu->arch.shared->mas1 =
+                         (vcpu->arch.shared->mas6 & MAS6_SPID0)
+                       | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
+                       | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
+               vcpu->arch.shared->mas2 &= MAS2_EPN;
+               vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
+                                          MAS2_ATTRIB_MASK;
+               vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
+                                            MAS3_U2 | MAS3_U3;
        }
 
        kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
@@ -929,19 +934,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
        struct kvm_book3e_206_tlb_entry *gtlbe;
        int tlbsel, esel;
 
-       tlbsel = get_tlb_tlbsel(vcpu_e500);
-       esel = get_tlb_esel(vcpu_e500, tlbsel);
+       tlbsel = get_tlb_tlbsel(vcpu);
+       esel = get_tlb_esel(vcpu, tlbsel);
 
        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 
        if (get_tlb_v(gtlbe))
                inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 
-       gtlbe->mas1 = vcpu_e500->mas1;
-       gtlbe->mas2 = vcpu_e500->mas2;
-       gtlbe->mas7_3 = vcpu_e500->mas7_3;
+       gtlbe->mas1 = vcpu->arch.shared->mas1;
+       gtlbe->mas2 = vcpu->arch.shared->mas2;
+       gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
 
-       trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
+       trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
                             (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
 
        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
index 2c296407e75917ae024ffc21a715073a48803522..5c6d2d7bf058f8efd657c6f8dea558ba3500f44b 100644 (file)
@@ -121,38 +121,33 @@ static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
        return !!(vcpu->arch.shared->msr & MSR_PR);
 }
 
-static inline unsigned int get_cur_spid(
-               const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
 {
-       return (vcpu_e500->mas6 >> 16) & 0xff;
+       return (vcpu->arch.shared->mas6 >> 16) & 0xff;
 }
 
-static inline unsigned int get_cur_sas(
-               const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
 {
-       return vcpu_e500->mas6 & 0x1;
+       return vcpu->arch.shared->mas6 & 0x1;
 }
 
-static inline unsigned int get_tlb_tlbsel(
-               const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
 {
        /*
         * Manual says that tlbsel has 2 bits wide.
         * Since we only have two TLBs, only lower bit is used.
         */
-       return (vcpu_e500->mas0 >> 28) & 0x1;
+       return (vcpu->arch.shared->mas0 >> 28) & 0x1;
 }
 
-static inline unsigned int get_tlb_nv_bit(
-               const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
 {
-       return vcpu_e500->mas0 & 0xfff;
+       return vcpu->arch.shared->mas0 & 0xfff;
 }
 
-static inline unsigned int get_tlb_esel_bit(
-               const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
 {
-       return (vcpu_e500->mas0 >> 16) & 0xfff;
+       return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
 }
 
 static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
index b6df56dd93ba279afb1eb8e4661729e8b78f2382..bda052e2264b5592b2e4fb25d4858fc102ee25d5 100644 (file)
@@ -162,7 +162,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
        case OP_TRAP_64:
                kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
 #else
-               kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
+               kvmppc_core_queue_program(vcpu,
+                                         vcpu->arch.shared->esr | ESR_PTR);
 #endif
                advance = 0;
                break;
index 7411bdd8ff6f0d79778c3f2db091c9b9ec64d9b6..d02e4c84e2130708df8cad62354654e727575f97 100644 (file)
@@ -66,7 +66,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
                vcpu->arch.magic_page_pa = param1;
                vcpu->arch.magic_page_ea = param2;
 
-               r2 = KVM_MAGIC_FEAT_SR;
+               r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
 
                r = HC_EV_SUCCESS;
                break;
This page took 0.059522 seconds and 5 git commands to generate.