KVM: s390: enable STFLE interpretation only if enabled for the guest
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
index 2270fe4c8b71e7e4a16e3948d85fe15ab7369c21..b6a065403bdc81dce7041db598e50bed1227eb3d 100644 (file)
@@ -158,6 +158,8 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
                kvm->arch.epoch -= *delta;
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        vcpu->arch.sie_block->epoch -= *delta;
+                       if (vcpu->arch.cputm_enabled)
+                               vcpu->arch.cputm_start += *delta;
                }
        }
        return NOTIFY_OK;
@@ -274,7 +276,6 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
        unsigned long address;
        struct gmap *gmap = kvm->arch.gmap;
 
-       down_read(&gmap->mm->mmap_sem);
        /* Loop over all guest pages */
        last_gfn = memslot->base_gfn + memslot->npages;
        for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
@@ -282,8 +283,10 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
 
                if (gmap_test_and_clear_dirty(address, gmap))
                        mark_page_dirty(kvm, cur_gfn);
+               if (fatal_signal_pending(current))
+                       return;
+               cond_resched();
        }
-       up_read(&gmap->mm->mmap_sem);
 }
 
 /* Section: vm related */
@@ -1428,6 +1431,93 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+       WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
+       raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
+       vcpu->arch.cputm_start = get_tod_clock_fast();
+       raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
+}
+
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+       WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
+       raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
+       vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
+       vcpu->arch.cputm_start = 0;
+       raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
+}
+
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+       WARN_ON_ONCE(vcpu->arch.cputm_enabled);
+       vcpu->arch.cputm_enabled = true;
+       __start_cpu_timer_accounting(vcpu);
+}
+
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+       WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
+       __stop_cpu_timer_accounting(vcpu);
+       vcpu->arch.cputm_enabled = false;
+}
+
+static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+       preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+       __enable_cpu_timer_accounting(vcpu);
+       preempt_enable();
+}
+
+static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+       preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+       __disable_cpu_timer_accounting(vcpu);
+       preempt_enable();
+}
+
+/* set the cpu timer - may only be called from the VCPU thread itself */
+void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
+{
+       preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+       raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
+       if (vcpu->arch.cputm_enabled)
+               vcpu->arch.cputm_start = get_tod_clock_fast();
+       vcpu->arch.sie_block->cputm = cputm;
+       raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
+       preempt_enable();
+}
+
+/* update and get the cpu timer - can also be called from other VCPU threads */
+__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
+{
+       unsigned int seq;
+       __u64 value;
+
+       if (unlikely(!vcpu->arch.cputm_enabled))
+               return vcpu->arch.sie_block->cputm;
+
+       preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+       do {
+               seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
+               /*
+                * If the writer would ever execute a read in the critical
+                * section, e.g. in irq context, we have a deadlock.
+                */
+               WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
+               value = vcpu->arch.sie_block->cputm;
+               /* if cputm_start is 0, accounting is being started/stopped */
+               if (likely(vcpu->arch.cputm_start))
+                       value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
+       } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
+       preempt_enable();
+       return value;
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        /* Save host register state */
@@ -1448,10 +1538,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        restore_access_regs(vcpu->run->s.regs.acrs);
        gmap_enable(vcpu->arch.gmap);
        atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+       if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
+               __start_cpu_timer_accounting(vcpu);
+       vcpu->cpu = cpu;
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       vcpu->cpu = -1;
+       if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
+               __stop_cpu_timer_accounting(vcpu);
        atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
 
@@ -1473,7 +1569,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.sie_block->gpsw.mask = 0UL;
        vcpu->arch.sie_block->gpsw.addr = 0UL;
        kvm_s390_set_prefix(vcpu, 0);
-       vcpu->arch.sie_block->cputm     = 0UL;
+       kvm_s390_set_cpu_timer(vcpu, 0);
        vcpu->arch.sie_block->ckc       = 0UL;
        vcpu->arch.sie_block->todpr     = 0;
        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
@@ -1543,7 +1639,8 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
 
        vcpu->arch.cpu_id = model->cpu_id;
        vcpu->arch.sie_block->ibc = model->ibc;
-       vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
+       if (test_kvm_facility(vcpu->kvm, 7))
+               vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
 }
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
@@ -1621,6 +1718,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
        vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+       seqcount_init(&vcpu->arch.cputm_seqcount);
 
        rc = kvm_vcpu_init(vcpu, kvm, id);
        if (rc)
@@ -1720,7 +1818,7 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
                             (u64 __user *)reg->addr);
                break;
        case KVM_REG_S390_CPU_TIMER:
-               r = put_user(vcpu->arch.sie_block->cputm,
+               r = put_user(kvm_s390_get_cpu_timer(vcpu),
                             (u64 __user *)reg->addr);
                break;
        case KVM_REG_S390_CLOCK_COMP:
@@ -1758,6 +1856,7 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
                                           struct kvm_one_reg *reg)
 {
        int r = -EINVAL;
+       __u64 val;
 
        switch (reg->id) {
        case KVM_REG_S390_TODPR:
@@ -1769,8 +1868,9 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
                             (u64 __user *)reg->addr);
                break;
        case KVM_REG_S390_CPU_TIMER:
-               r = get_user(vcpu->arch.sie_block->cputm,
-                            (u64 __user *)reg->addr);
+               r = get_user(val, (u64 __user *)reg->addr);
+               if (!r)
+                       kvm_s390_set_cpu_timer(vcpu, val);
                break;
        case KVM_REG_S390_CLOCK_COMP:
                r = get_user(vcpu->arch.sie_block->ckc,
@@ -2163,8 +2263,10 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 
 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
 {
-       psw_t *psw = &vcpu->arch.sie_block->gpsw;
-       u8 opcode;
+       struct kvm_s390_pgm_info pgm_info = {
+               .code = PGM_ADDRESSING,
+       };
+       u8 opcode, ilen;
        int rc;
 
        VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
@@ -2178,12 +2280,21 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
         * to look up the current opcode to get the length of the instruction
         * to be able to forward the PSW.
         */
-       rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
-       if (rc)
-               return kvm_s390_inject_prog_cond(vcpu, rc);
-       psw->addr = __rewind_psw(*psw, -insn_length(opcode));
-
-       return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       rc = read_guest_instr(vcpu, &opcode, 1);
+       ilen = insn_length(opcode);
+       if (rc < 0) {
+               return rc;
+       } else if (rc) {
+               /* Instruction-Fetching Exceptions - we can't detect the ilen.
+                * Forward by arbitrary ilc, injection will take care of
+                * nullification if necessary.
+                */
+               pgm_info = vcpu->arch.pgm;
+               ilen = 4;
+       }
+       pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
+       kvm_s390_forward_psw(vcpu, ilen);
+       return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
 }
 
 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
@@ -2249,10 +2360,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                 */
                local_irq_disable();
                __kvm_guest_enter();
+               __disable_cpu_timer_accounting(vcpu);
                local_irq_enable();
                exit_reason = sie64a(vcpu->arch.sie_block,
                                     vcpu->run->s.regs.gprs);
                local_irq_disable();
+               __enable_cpu_timer_accounting(vcpu);
                __kvm_guest_exit();
                local_irq_enable();
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -2276,7 +2389,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        }
        if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
-               vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
+               kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
                vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
                vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
                vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
@@ -2298,7 +2411,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
        kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
        memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
-       kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
+       kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
        kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
        kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
        kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
@@ -2330,6 +2443,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        }
 
        sync_regs(vcpu, kvm_run);
+       enable_cpu_timer_accounting(vcpu);
 
        might_fault();
        rc = __vcpu_run(vcpu);
@@ -2349,6 +2463,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                rc = 0;
        }
 
+       disable_cpu_timer_accounting(vcpu);
        store_regs(vcpu, kvm_run);
 
        if (vcpu->sigset_active)
@@ -2369,7 +2484,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
        unsigned char archmode = 1;
        freg_t fprs[NUM_FPRS];
        unsigned int px;
-       u64 clkcomp;
+       u64 clkcomp, cputm;
        int rc;
 
        px = kvm_s390_get_prefix(vcpu);
@@ -2403,8 +2518,9 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
                              &vcpu->run->s.regs.fpc, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
                              &vcpu->arch.sie_block->todpr, 4);
+       cputm = kvm_s390_get_cpu_timer(vcpu);
        rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
-                             &vcpu->arch.sie_block->cputm, 8);
+                             &cputm, 8);
        clkcomp = vcpu->arch.sie_block->ckc >> 8;
        rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
                              &clkcomp, 8);
@@ -2610,7 +2726,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
        switch (mop->op) {
        case KVM_S390_MEMOP_LOGICAL_READ:
                if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
-                       r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
+                       r = check_gva_range(vcpu, mop->gaddr, mop->ar,
+                                           mop->size, GACC_FETCH);
                        break;
                }
                r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
@@ -2621,7 +2738,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
                break;
        case KVM_S390_MEMOP_LOGICAL_WRITE:
                if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
-                       r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
+                       r = check_gva_range(vcpu, mop->gaddr, mop->ar,
+                                           mop->size, GACC_STORE);
                        break;
                }
                if (copy_from_user(tmpbuf, uaddr, mop->size)) {
This page took 0.035248 seconds and 5 git commands to generate.