powerpc: kvm: optimize "sc 1" as fast return
authorLiu Ping Fan <kernelfans@gmail.com>
Tue, 19 Nov 2013 06:12:48 +0000 (14:12 +0800)
committerAlexander Graf <agraf@suse.de>
Thu, 21 Nov 2013 13:56:45 +0000 (14:56 +0100)
In some scene, e.g openstack CI, PR guest can trigger "sc 1" frequently,
this patch optimizes the path by directly delivering BOOK3S_INTERRUPT_SYSCALL
to HV guest, so powernv can return to HV guest without heavy exit, i.e,
no need to swap TLB, HTAB,.. etc

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S

index 072287f1c3bc7347b6ff4f959843c97053fa4aa1..93203bbe571441bbf05c312470ec728fa969cc70 100644 (file)
@@ -669,12 +669,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
                /* hcall - punt to userspace */
                int i;
 
-               if (vcpu->arch.shregs.msr & MSR_PR) {
-                       /* sc 1 from userspace - reflect to guest syscall */
-                       kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
-                       r = RESUME_GUEST;
-                       break;
-               }
+               /* hypercall with MSR_PR has already been handled in rmode,
+                * and never reaches here.
+                */
+
                run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
                for (i = 0; i < 9; ++i)
                        run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
index bc8de75b1925cd34ad1d11379fee8c09f1d275e5..d5ddc2d10748443d4237f03de10987bae6bff070 100644 (file)
@@ -686,6 +686,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 5:     mtspr   SPRN_SRR0, r6
        mtspr   SPRN_SRR1, r7
 
+/*
+ * Required state:
+ * R4 = vcpu
+ * R10: value for HSRR0
+ * R11: value for HSRR1
+ * R13 = PACA
+ */
 fast_guest_return:
        li      r0,0
        stb     r0,VCPU_CEDED(r4)       /* cancel cede */
@@ -1471,7 +1478,8 @@ kvmppc_hisi:
 hcall_try_real_mode:
        ld      r3,VCPU_GPR(R3)(r9)
        andi.   r0,r11,MSR_PR
-       bne     guest_exit_cont
+       /* sc 1 from userspace - reflect to guest syscall */
+       bne     sc_1_fast_return
        clrrdi  r3,r3,2
        cmpldi  r3,hcall_real_table_end - hcall_real_table
        bge     guest_exit_cont
@@ -1492,6 +1500,15 @@ hcall_try_real_mode:
        ld      r11,VCPU_MSR(r4)
        b       fast_guest_return
 
+sc_1_fast_return:
+       mtspr   SPRN_SRR0,r10
+       mtspr   SPRN_SRR1,r11
+       li      r10, BOOK3S_INTERRUPT_SYSCALL
+       li      r11, (MSR_ME << 1) | 1  /* synthesize MSR_SF | MSR_ME */
+       rotldi  r11, r11, 63
+       mr      r4,r9
+       b       fast_guest_return
+
        /* We've attempted a real mode hcall, but it's punted it back
         * to userspace.  We need to restore some clobbered volatiles
         * before resuming the pass-it-to-qemu path */
This page took 0.05156 seconds and 5 git commands to generate.