KVM: s390: rewrite vcpu_post_run and drop out early
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
index c6b4063fce295b648966e791d6114a610d9d407a..5c36c8e7e937a73ec7349b63435fb0cf1966644e 100644 (file)
@@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                r = 0;
                break;
        case KVM_CAP_S390_VECTOR_REGISTERS:
-               if (MACHINE_HAS_VX) {
+               mutex_lock(&kvm->lock);
+               if (atomic_read(&kvm->online_vcpus)) {
+                       r = -EBUSY;
+               } else if (MACHINE_HAS_VX) {
                        set_kvm_facility(kvm->arch.model.fac->mask, 129);
                        set_kvm_facility(kvm->arch.model.fac->list, 129);
                        r = 0;
                } else
                        r = -EINVAL;
+               mutex_unlock(&kvm->lock);
                VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
                         r ? "(not available)" : "(success)");
                break;
@@ -514,35 +518,20 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
 
        if (gtod_high != 0)
                return -EINVAL;
-       VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
+       VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
 
        return 0;
 }
 
 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       struct kvm_vcpu *cur_vcpu;
-       unsigned int vcpu_idx;
-       u64 host_tod, gtod;
-       int r;
+       u64 gtod;
 
        if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
                return -EFAULT;
 
-       r = store_tod_clock(&host_tod);
-       if (r)
-               return r;
-
-       mutex_lock(&kvm->lock);
-       preempt_disable();
-       kvm->arch.epoch = gtod - host_tod;
-       kvm_s390_vcpu_block_all(kvm);
-       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
-               cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
-       kvm_s390_vcpu_unblock_all(kvm);
-       preempt_enable();
-       mutex_unlock(&kvm->lock);
-       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
+       kvm_s390_set_tod_clock(kvm, gtod);
+       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
        return 0;
 }
 
@@ -574,26 +563,19 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
        if (copy_to_user((void __user *)attr->addr, &gtod_high,
                                         sizeof(gtod_high)))
                return -EFAULT;
-       VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
+       VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
 
        return 0;
 }
 
 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       u64 host_tod, gtod;
-       int r;
+       u64 gtod;
 
-       r = store_tod_clock(&host_tod);
-       if (r)
-               return r;
-
-       preempt_disable();
-       gtod = host_tod + kvm->arch.epoch;
-       preempt_enable();
+       gtod = kvm_s390_get_tod_clock_fast(kvm);
        if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
                return -EFAULT;
-       VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
+       VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
 
        return 0;
 }
@@ -1120,7 +1102,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (!kvm->arch.sca)
                goto out_err;
        spin_lock(&kvm_lock);
-       sca_offset = (sca_offset + 16) & 0x7f0;
+       sca_offset += 16;
+       if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
+               sca_offset = 0;
        kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
        spin_unlock(&kvm_lock);
 
@@ -1911,6 +1895,22 @@ retry:
        return 0;
 }
 
+void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
+{
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       mutex_lock(&kvm->lock);
+       preempt_disable();
+       kvm->arch.epoch = tod - get_tod_clock();
+       kvm_s390_vcpu_block_all(kvm);
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+       kvm_s390_vcpu_unblock_all(kvm);
+       preempt_enable();
+       mutex_unlock(&kvm->lock);
+}
+
 /**
  * kvm_arch_fault_in_page - fault-in guest page if necessary
  * @vcpu: The corresponding virtual cpu
@@ -2071,8 +2071,6 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
 
 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
 {
-       int rc = -1;
-
        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
                   vcpu->arch.sie_block->icptcode);
        trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
@@ -2080,40 +2078,35 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
        if (guestdbg_enabled(vcpu))
                kvm_s390_restore_guest_per_regs(vcpu);
 
-       if (exit_reason >= 0) {
-               rc = 0;
+       memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
+
+       if (vcpu->arch.sie_block->icptcode > 0) {
+               int rc = kvm_handle_sie_intercept(vcpu);
+
+               if (rc != -EOPNOTSUPP)
+                       return rc;
+               vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
+               vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
+               vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
+               vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
+               return -EREMOTE;
+       } else if (exit_reason != -EFAULT) {
+               vcpu->stat.exit_null++;
+               return 0;
        } else if (kvm_is_ucontrol(vcpu->kvm)) {
                vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
                vcpu->run->s390_ucontrol.trans_exc_code =
                                                current->thread.gmap_addr;
                vcpu->run->s390_ucontrol.pgm_code = 0x10;
-               rc = -EREMOTE;
-
+               return -EREMOTE;
        } else if (current->thread.gmap_pfault) {
                trace_kvm_s390_major_guest_pfault(vcpu);
                current->thread.gmap_pfault = 0;
-               if (kvm_arch_setup_async_pf(vcpu)) {
-                       rc = 0;
-               } else {
-                       gpa_t gpa = current->thread.gmap_addr;
-                       rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
-               }
-       }
-
-       if (rc == -1)
-               rc = vcpu_post_run_fault_in_sie(vcpu);
-
-       memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
-
-       if (rc == 0) {
-               if (kvm_is_ucontrol(vcpu->kvm))
-                       /* Don't exit for host interrupts. */
-                       rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
-               else
-                       rc = kvm_handle_sie_intercept(vcpu);
+               if (kvm_arch_setup_async_pf(vcpu))
+                       return 0;
+               return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
        }
-
-       return rc;
+       return vcpu_post_run_fault_in_sie(vcpu);
 }
 
 static int __vcpu_run(struct kvm_vcpu *vcpu)
@@ -2233,18 +2226,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                rc = 0;
        }
 
-       if (rc == -EOPNOTSUPP) {
-               /* intercept cannot be handled in-kernel, prepare kvm-run */
-               kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
-               kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
-               kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
-               kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
-               rc = 0;
-       }
-
        if (rc == -EREMOTE) {
-               /* intercept was handled, but userspace support is needed
-                * kvm_run has been prepared by the handler */
+               /* userspace support is needed, kvm_run has been prepared */
                rc = 0;
        }
 
This page took 0.025904 seconds and 5 git commands to generate.