KVM: PPC: Book3S HV: Add H_SET_MODE hcall handling
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv.c
index aba05bbb3e744b6e516afbe9093923793762040d..7db9df2ac211b34c7ace4b8d040bcc58efcf8396 100644 (file)
@@ -67,6 +67,8 @@
 /* Used as a "null" value for timebase values */
 #define TB_NIL (~(u64)0)
 
+static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
+
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
@@ -555,6 +557,48 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
        vcpu->arch.dtl.dirty = true;
 }
 
+static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
+               return true;
+       if ((!vcpu->arch.vcore->arch_compat) &&
+           cpu_has_feature(CPU_FTR_ARCH_207S))
+               return true;
+       return false;
+}
+
+static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+                            unsigned long resource, unsigned long value1,
+                            unsigned long value2)
+{
+       switch (resource) {
+       case H_SET_MODE_RESOURCE_SET_CIABR:
+               if (!kvmppc_power8_compatible(vcpu))
+                       return H_P2;
+               if (value2)
+                       return H_P4;
+               if (mflags)
+                       return H_UNSUPPORTED_FLAG_START;
+               /* Guests can't breakpoint the hypervisor */
+               if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
+                       return H_P3;
+               vcpu->arch.ciabr  = value1;
+               return H_SUCCESS;
+       case H_SET_MODE_RESOURCE_SET_DAWR:
+               if (!kvmppc_power8_compatible(vcpu))
+                       return H_P2;
+               if (mflags)
+                       return H_UNSUPPORTED_FLAG_START;
+               if (value2 & DABRX_HYP)
+                       return H_P4;
+               vcpu->arch.dawr  = value1;
+               vcpu->arch.dawrx = value2;
+               return H_SUCCESS;
+       default:
+               return H_TOO_HARD;
+       }
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
        unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -562,6 +606,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        struct kvm_vcpu *tvcpu;
        int idx, rc;
 
+       if (req <= MAX_HCALL_OPCODE &&
+           !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
+               return RESUME_HOST;
+
        switch (req) {
        case H_ENTER:
                idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -620,7 +668,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 
                /* Send the error out to userspace via KVM_RUN */
                return rc;
-
+       case H_SET_MODE:
+               ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5),
+                                       kvmppc_get_gpr(vcpu, 6),
+                                       kvmppc_get_gpr(vcpu, 7));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
        case H_XIRR:
        case H_CPPR:
        case H_EOI:
@@ -639,6 +694,29 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        return RESUME_GUEST;
 }
 
+static int kvmppc_hcall_impl_hv(unsigned long cmd)
+{
+       switch (cmd) {
+       case H_CEDE:
+       case H_PROD:
+       case H_CONFER:
+       case H_REGISTER_VPA:
+       case H_SET_MODE:
+#ifdef CONFIG_KVM_XICS
+       case H_XIRR:
+       case H_CPPR:
+       case H_EOI:
+       case H_IPI:
+       case H_IPOLL:
+       case H_XIRR_X:
+#endif
+               return 1;
+       }
+
+       /* See if it's in the real-mode table */
+       return kvmppc_hcall_impl_hv_realmode(cmd);
+}
+
 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                 struct task_struct *tsk)
 {
@@ -894,12 +972,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_CIABR:
                *val = get_reg_val(id, vcpu->arch.ciabr);
                break;
-       case KVM_REG_PPC_IC:
-               *val = get_reg_val(id, vcpu->arch.ic);
-               break;
-       case KVM_REG_PPC_VTB:
-               *val = get_reg_val(id, vcpu->arch.vtb);
-               break;
        case KVM_REG_PPC_CSIGR:
                *val = get_reg_val(id, vcpu->arch.csigr);
                break;
@@ -1094,12 +1166,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
                        vcpu->arch.ciabr &= ~CIABR_PRIV;        /* disable */
                break;
-       case KVM_REG_PPC_IC:
-               vcpu->arch.ic = set_reg_val(id, *val);
-               break;
-       case KVM_REG_PPC_VTB:
-               vcpu->arch.vtb = set_reg_val(id, *val);
-               break;
        case KVM_REG_PPC_CSIGR:
                vcpu->arch.csigr = set_reg_val(id, *val);
                break;
@@ -1236,7 +1302,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
        int core;
        struct kvmppc_vcore *vcore;
 
-       core = id / threads_per_core;
+       core = id / threads_per_subcore;
        if (core >= KVM_MAX_VCORES)
                goto out;
 
@@ -1286,7 +1352,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
                        init_waitqueue_head(&vcore->wq);
                        vcore->preempt_tb = TB_NIL;
                        vcore->lpcr = kvm->arch.lpcr;
-                       vcore->first_vcpuid = core * threads_per_core;
+                       vcore->first_vcpuid = core * threads_per_subcore;
                        vcore->kvm = kvm;
                }
                kvm->arch.vcores[core] = vcore;
@@ -1476,16 +1542,19 @@ static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
 static int on_primary_thread(void)
 {
        int cpu = smp_processor_id();
-       int thr = cpu_thread_in_core(cpu);
+       int thr;
 
-       if (thr)
+       /* Are we on a primary subcore? */
+       if (cpu_thread_in_subcore(cpu))
                return 0;
-       while (++thr < threads_per_core)
+
+       thr = 0;
+       while (++thr < threads_per_subcore)
                if (cpu_online(cpu + thr))
                        return 0;
 
        /* Grab all hw threads so they can't go into the kernel */
-       for (thr = 1; thr < threads_per_core; ++thr) {
+       for (thr = 1; thr < threads_per_subcore; ++thr) {
                if (kvmppc_grab_hwthread(cpu + thr)) {
                        /* Couldn't grab one; let the others go */
                        do {
@@ -1544,15 +1613,18 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
        }
 
        /*
-        * Make sure we are running on thread 0, and that
-        * secondary threads are offline.
+        * Make sure we are running on primary threads, and that secondary
+        * threads are offline.  Also check if the number of threads in this
+        * guest are greater than the current system threads per guest.
         */
-       if (threads_per_core > 1 && !on_primary_thread()) {
+       if ((threads_per_core > 1) &&
+           ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
                list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
                        vcpu->arch.ret = -EBUSY;
                goto out;
        }
 
+
        vc->pcpu = smp_processor_id();
        list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                kvmppc_start_thread(vcpu);
@@ -1580,7 +1652,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
        /* wait for secondary threads to finish writing their state to memory */
        if (vc->nap_count < vc->n_woken)
                kvmppc_wait_for_nap(vc);
-       for (i = 0; i < threads_per_core; ++i)
+       for (i = 0; i < threads_per_subcore; ++i)
                kvmppc_release_hwthread(vc->pcpu + i);
        /* prevent other vcpu threads from doing kvmppc_start_thread() now */
        vc->vcore_state = VCORE_EXITING;
@@ -2275,6 +2347,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
         */
        cpumask_setall(&kvm->arch.need_tlb_flush);
 
+       /* Start out with the default set of hcalls enabled */
+       memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
+              sizeof(kvm->arch.enabled_hcalls));
+
        kvm->arch.rma = NULL;
 
        kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -2305,10 +2381,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
        spin_lock_init(&kvm->arch.slot_phys_lock);
 
        /*
-        * Don't allow secondary CPU threads to come online
-        * while any KVM VMs exist.
+        * Track that we now have a HV mode VM active. This blocks secondary
+        * CPU threads from coming online.
         */
-       inhibit_secondary_onlining();
+       kvm_hv_vm_activated();
 
        return 0;
 }
@@ -2324,7 +2400,7 @@ static void kvmppc_free_vcores(struct kvm *kvm)
 
 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
 {
-       uninhibit_secondary_onlining();
+       kvm_hv_vm_deactivated();
 
        kvmppc_free_vcores(kvm);
        if (kvm->arch.rma) {
@@ -2413,6 +2489,49 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
        return r;
 }
 
+/*
+ * List of hcall numbers to enable by default.
+ * For compatibility with old userspace, we enable by default
+ * all hcalls that were implemented before the hcall-enabling
+ * facility was added.  Note this list should not include H_RTAS.
+ */
+static unsigned int default_hcall_list[] = {
+       H_REMOVE,
+       H_ENTER,
+       H_READ,
+       H_PROTECT,
+       H_BULK_REMOVE,
+       H_GET_TCE,
+       H_PUT_TCE,
+       H_SET_DABR,
+       H_SET_XDABR,
+       H_CEDE,
+       H_PROD,
+       H_CONFER,
+       H_REGISTER_VPA,
+#ifdef CONFIG_KVM_XICS
+       H_EOI,
+       H_CPPR,
+       H_IPI,
+       H_IPOLL,
+       H_XIRR,
+       H_XIRR_X,
+#endif
+       0
+};
+
+static void init_default_hcalls(void)
+{
+       int i;
+       unsigned int hcall;
+
+       for (i = 0; default_hcall_list[i]; ++i) {
+               hcall = default_hcall_list[i];
+               WARN_ON(!kvmppc_hcall_impl_hv(hcall));
+               __set_bit(hcall / 4, default_enabled_hcalls);
+       }
+}
+
 static struct kvmppc_ops kvm_ops_hv = {
        .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
        .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -2445,6 +2564,7 @@ static struct kvmppc_ops kvm_ops_hv = {
        .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
        .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
        .arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
+       .hcall_implemented = kvmppc_hcall_impl_hv,
 };
 
 static int kvmppc_book3s_init_hv(void)
@@ -2460,6 +2580,8 @@ static int kvmppc_book3s_init_hv(void)
        kvm_ops_hv.owner = THIS_MODULE;
        kvmppc_hv_ops = &kvm_ops_hv;
 
+       init_default_hcalls();
+
        r = kvmppc_mmu_hv_init();
        return r;
 }
This page took 0.029538 seconds and 5 git commands to generate.