KVM: s390: add cpu model support
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
index 6b049ee75a5694d74cce4f6402b92ae7879f8c4e..0c362392756310e1ce0e2da8e35c6bedb5cf0849 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/module.h>
+#include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <asm/asm-offsets.h>
@@ -29,7 +30,6 @@
 #include <asm/pgtable.h>
 #include <asm/nmi.h>
 #include <asm/switch_to.h>
-#include <asm/facility.h>
 #include <asm/sclp.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
@@ -50,6 +50,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "exit_instruction", VCPU_STAT(exit_instruction) },
        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+       { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
        { "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -81,25 +82,37 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
        { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
        { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
+       { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
+       { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
        { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
+       { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
+       { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
        { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
        { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
+       { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
+       { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
+       { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
        { "diagnose_10", VCPU_STAT(diagnose_10) },
        { "diagnose_44", VCPU_STAT(diagnose_44) },
        { "diagnose_9c", VCPU_STAT(diagnose_9c) },
        { NULL }
 };
 
-unsigned long *vfacilities;
-static struct gmap_notifier gmap_notifier;
+/* upper facilities limit for kvm */
+unsigned long kvm_s390_fac_list_mask[] = {
+       0xff82fffbf4fc2000UL,
+       0x005c000000000000UL,
+};
 
-/* test availability of vfacility */
-int test_vfacility(unsigned long nr)
+unsigned long kvm_s390_fac_list_mask_size(void)
 {
-       return __test_facility(nr, (void *) vfacilities);
+       BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
+       return ARRAY_SIZE(kvm_s390_fac_list_mask);
 }
 
+static struct gmap_notifier gmap_notifier;
+
 /* Section: not file related */
 int kvm_arch_hardware_enable(void)
 {
@@ -159,6 +172,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_IRQCHIP:
        case KVM_CAP_VM_ATTRIBUTES:
        case KVM_CAP_MP_STATE:
+       case KVM_CAP_S390_USER_SIGP:
                r = 1;
                break;
        case KVM_CAP_NR_VCPUS:
@@ -247,6 +261,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                kvm->arch.use_irqchip = 1;
                r = 0;
                break;
+       case KVM_CAP_S390_USER_SIGP:
+               kvm->arch.user_sigp = 1;
+               r = 0;
+               break;
        default:
                r = -EINVAL;
                break;
@@ -254,7 +272,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
        return r;
 }
 
-static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
+static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       int ret;
+
+       switch (attr->attr) {
+       case KVM_S390_VM_MEM_LIMIT_SIZE:
+               ret = 0;
+               if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
+                       ret = -EFAULT;
+               break;
+       default:
+               ret = -ENXIO;
+               break;
+       }
+       return ret;
+}
+
+static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
 {
        int ret;
        unsigned int idx;
@@ -276,6 +311,142 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
                mutex_unlock(&kvm->lock);
                ret = 0;
                break;
+       case KVM_S390_VM_MEM_LIMIT_SIZE: {
+               unsigned long new_limit;
+
+               if (kvm_is_ucontrol(kvm))
+                       return -EINVAL;
+
+               if (get_user(new_limit, (u64 __user *)attr->addr))
+                       return -EFAULT;
+
+               if (new_limit > kvm->arch.gmap->asce_end)
+                       return -E2BIG;
+
+               ret = -EBUSY;
+               mutex_lock(&kvm->lock);
+               if (atomic_read(&kvm->online_vcpus) == 0) {
+                       /* gmap_alloc will round the limit up */
+                       struct gmap *new = gmap_alloc(current->mm, new_limit);
+
+                       if (!new) {
+                               ret = -ENOMEM;
+                       } else {
+                               gmap_free(kvm->arch.gmap);
+                               new->private = kvm;
+                               kvm->arch.gmap = new;
+                               ret = 0;
+                       }
+               }
+               mutex_unlock(&kvm->lock);
+               break;
+       }
+       default:
+               ret = -ENXIO;
+               break;
+       }
+       return ret;
+}
+
+static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
+
+static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       if (!test_kvm_facility(kvm, 76))
+               return -EINVAL;
+
+       mutex_lock(&kvm->lock);
+       switch (attr->attr) {
+       case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+               get_random_bytes(
+                       kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+                       sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+               kvm->arch.crypto.aes_kw = 1;
+               break;
+       case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+               get_random_bytes(
+                       kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+                       sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+               kvm->arch.crypto.dea_kw = 1;
+               break;
+       case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+               kvm->arch.crypto.aes_kw = 0;
+               memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
+                       sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+               break;
+       case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
+               kvm->arch.crypto.dea_kw = 0;
+               memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
+                       sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+               break;
+       default:
+               mutex_unlock(&kvm->lock);
+               return -ENXIO;
+       }
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_s390_vcpu_crypto_setup(vcpu);
+               exit_sie(vcpu);
+       }
+       mutex_unlock(&kvm->lock);
+       return 0;
+}
+
+static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       u8 gtod_high;
+
+       if (copy_from_user(&gtod_high, (void __user *)attr->addr,
+                                          sizeof(gtod_high)))
+               return -EFAULT;
+
+       if (gtod_high != 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       struct kvm_vcpu *cur_vcpu;
+       unsigned int vcpu_idx;
+       u64 host_tod, gtod;
+       int r;
+
+       if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+               return -EFAULT;
+
+       r = store_tod_clock(&host_tod);
+       if (r)
+               return r;
+
+       mutex_lock(&kvm->lock);
+       kvm->arch.epoch = gtod - host_tod;
+       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
+               cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+               exit_sie(cur_vcpu);
+       }
+       mutex_unlock(&kvm->lock);
+       return 0;
+}
+
+static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       int ret;
+
+       if (attr->flags)
+               return -EINVAL;
+
+       switch (attr->attr) {
+       case KVM_S390_VM_TOD_HIGH:
+               ret = kvm_s390_set_tod_high(kvm, attr);
+               break;
+       case KVM_S390_VM_TOD_LOW:
+               ret = kvm_s390_set_tod_low(kvm, attr);
+               break;
        default:
                ret = -ENXIO;
                break;
@@ -283,13 +454,170 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
        return ret;
 }
 
+static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       u8 gtod_high = 0;
+
+       if (copy_to_user((void __user *)attr->addr, &gtod_high,
+                                        sizeof(gtod_high)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       u64 host_tod, gtod;
+       int r;
+
+       r = store_tod_clock(&host_tod);
+       if (r)
+               return r;
+
+       gtod = host_tod + kvm->arch.epoch;
+       if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       int ret;
+
+       if (attr->flags)
+               return -EINVAL;
+
+       switch (attr->attr) {
+       case KVM_S390_VM_TOD_HIGH:
+               ret = kvm_s390_get_tod_high(kvm, attr);
+               break;
+       case KVM_S390_VM_TOD_LOW:
+               ret = kvm_s390_get_tod_low(kvm, attr);
+               break;
+       default:
+               ret = -ENXIO;
+               break;
+       }
+       return ret;
+}
+
+static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       struct kvm_s390_vm_cpu_processor *proc;
+       int ret = 0;
+
+       mutex_lock(&kvm->lock);
+       if (atomic_read(&kvm->online_vcpus)) {
+               ret = -EBUSY;
+               goto out;
+       }
+       proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+       if (!proc) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       if (!copy_from_user(proc, (void __user *)attr->addr,
+                           sizeof(*proc))) {
+               memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
+                      sizeof(struct cpuid));
+               kvm->arch.model.ibc = proc->ibc;
+               memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+                      S390_ARCH_FAC_LIST_SIZE_BYTE);
+       } else
+               ret = -EFAULT;
+       kfree(proc);
+out:
+       mutex_unlock(&kvm->lock);
+       return ret;
+}
+
+static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->attr) {
+       case KVM_S390_VM_CPU_PROCESSOR:
+               ret = kvm_s390_set_processor(kvm, attr);
+               break;
+       }
+       return ret;
+}
+
+static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       struct kvm_s390_vm_cpu_processor *proc;
+       int ret = 0;
+
+       proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+       if (!proc) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
+       proc->ibc = kvm->arch.model.ibc;
+       memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+       if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
+               ret = -EFAULT;
+       kfree(proc);
+out:
+       return ret;
+}
+
+static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       struct kvm_s390_vm_cpu_machine *mach;
+       int ret = 0;
+
+       mach = kzalloc(sizeof(*mach), GFP_KERNEL);
+       if (!mach) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       get_cpu_id((struct cpuid *) &mach->cpuid);
+       mach->ibc = sclp_get_ibc();
+       memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
+              kvm_s390_fac_list_mask_size() * sizeof(u64));
+       memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
+              S390_ARCH_FAC_LIST_SIZE_U64);
+       if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
+               ret = -EFAULT;
+       kfree(mach);
+out:
+       return ret;
+}
+
+static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->attr) {
+       case KVM_S390_VM_CPU_PROCESSOR:
+               ret = kvm_s390_get_processor(kvm, attr);
+               break;
+       case KVM_S390_VM_CPU_MACHINE:
+               ret = kvm_s390_get_machine(kvm, attr);
+               break;
+       }
+       return ret;
+}
+
 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 {
        int ret;
 
        switch (attr->group) {
        case KVM_S390_VM_MEM_CTRL:
-               ret = kvm_s390_mem_control(kvm, attr);
+               ret = kvm_s390_set_mem_control(kvm, attr);
+               break;
+       case KVM_S390_VM_TOD:
+               ret = kvm_s390_set_tod(kvm, attr);
+               break;
+       case KVM_S390_VM_CPU_MODEL:
+               ret = kvm_s390_set_cpu_model(kvm, attr);
+               break;
+       case KVM_S390_VM_CRYPTO:
+               ret = kvm_s390_vm_set_crypto(kvm, attr);
                break;
        default:
                ret = -ENXIO;
@@ -301,7 +629,24 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 
 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       return -ENXIO;
+       int ret;
+
+       switch (attr->group) {
+       case KVM_S390_VM_MEM_CTRL:
+               ret = kvm_s390_get_mem_control(kvm, attr);
+               break;
+       case KVM_S390_VM_TOD:
+               ret = kvm_s390_get_tod(kvm, attr);
+               break;
+       case KVM_S390_VM_CPU_MODEL:
+               ret = kvm_s390_get_cpu_model(kvm, attr);
+               break;
+       default:
+               ret = -ENXIO;
+               break;
+       }
+
+       return ret;
 }
 
 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
@@ -313,6 +658,42 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
                switch (attr->attr) {
                case KVM_S390_VM_MEM_ENABLE_CMMA:
                case KVM_S390_VM_MEM_CLR_CMMA:
+               case KVM_S390_VM_MEM_LIMIT_SIZE:
+                       ret = 0;
+                       break;
+               default:
+                       ret = -ENXIO;
+                       break;
+               }
+               break;
+       case KVM_S390_VM_TOD:
+               switch (attr->attr) {
+               case KVM_S390_VM_TOD_LOW:
+               case KVM_S390_VM_TOD_HIGH:
+                       ret = 0;
+                       break;
+               default:
+                       ret = -ENXIO;
+                       break;
+               }
+               break;
+       case KVM_S390_VM_CPU_MODEL:
+               switch (attr->attr) {
+               case KVM_S390_VM_CPU_PROCESSOR:
+               case KVM_S390_VM_CPU_MACHINE:
+                       ret = 0;
+                       break;
+               default:
+                       ret = -ENXIO;
+                       break;
+               }
+               break;
+       case KVM_S390_VM_CRYPTO:
+               switch (attr->attr) {
+               case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+               case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+               case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+               case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
                        ret = 0;
                        break;
                default:
@@ -394,9 +775,61 @@ long kvm_arch_vm_ioctl(struct file *filp,
        return r;
 }
 
+static int kvm_s390_query_ap_config(u8 *config)
+{
+       u32 fcn_code = 0x04000000UL;
+       u32 cc;
+
+       asm volatile(
+               "lgr 0,%1\n"
+               "lgr 2,%2\n"
+               ".long 0xb2af0000\n"            /* PQAP(QCI) */
+               "ipm %0\n"
+               "srl %0,28\n"
+               : "=r" (cc)
+               : "r" (fcn_code), "r" (config)
+               : "cc", "0", "2", "memory"
+       );
+
+       return cc;
+}
+
+static int kvm_s390_apxa_installed(void)
+{
+       u8 config[128];
+       int cc;
+
+       if (test_facility(2) && test_facility(12)) {
+               cc = kvm_s390_query_ap_config(config);
+
+               if (cc)
+                       pr_err("PQAP(QCI) failed with cc=%d", cc);
+               else
+                       return config[0] & 0x40;
+       }
+
+       return 0;
+}
+
+static void kvm_s390_set_crycb_format(struct kvm *kvm)
+{
+       kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
+
+       if (kvm_s390_apxa_installed())
+               kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
+       else
+               kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
+}
+
+static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
+{
+       get_cpu_id(cpu_id);
+       cpu_id->version = 0xff;
+}
+
 static int kvm_s390_crypto_init(struct kvm *kvm)
 {
-       if (!test_vfacility(76))
+       if (!test_kvm_facility(kvm, 76))
                return 0;
 
        kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
@@ -404,15 +837,18 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
        if (!kvm->arch.crypto.crycb)
                return -ENOMEM;
 
-       kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
-                                 CRYCB_FORMAT1;
+       kvm_s390_set_crycb_format(kvm);
+
+       /* Disable AES/DEA protected key functions by default */
+       kvm->arch.crypto.aes_kw = 0;
+       kvm->arch.crypto.dea_kw = 0;
 
        return 0;
 }
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
-       int rc;
+       int i, rc;
        char debug_name[16];
        static unsigned long sca_offset;
 
@@ -447,12 +883,53 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (!kvm->arch.dbf)
                goto out_nodbf;
 
+       /*
+        * The architectural maximum amount of facilities is 16 kbit. To store
+        * this amount, 2 kbyte of memory is required. Thus we need a full
+        * page to hold the active copy (arch.model.fac->sie) and the current
+        * facilities set (arch.model.fac->kvm). Its address size has to be
+        * 31 bits and word aligned.
+        */
+       kvm->arch.model.fac =
+               (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+       if (!kvm->arch.model.fac)
+               goto out_nofac;
+
+       memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
+              S390_ARCH_FAC_LIST_SIZE_U64);
+
+       /*
+        * If this KVM host runs *not* in a LPAR, relax the facility bits
+        * of the kvm facility mask by all missing facilities. This will allow
+        * to determine the right CPU model by means of the remaining facilities.
+        * Live guest migration must prohibit the migration of KVMs running in
+        * a LPAR to non LPAR hosts.
+        */
+       if (!MACHINE_IS_LPAR)
+               for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
+                       kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
+
+       /*
+        * Apply the kvm facility mask to limit the kvm supported/tolerated
+        * facility list.
+        */
+       for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
+               if (i < kvm_s390_fac_list_mask_size())
+                       kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+               else
+                       kvm->arch.model.fac->kvm[i] = 0UL;
+       }
+
+       kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
+       kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
+
        if (kvm_s390_crypto_init(kvm) < 0)
                goto out_crypto;
 
        spin_lock_init(&kvm->arch.float_int.lock);
        INIT_LIST_HEAD(&kvm->arch.float_int.list);
        init_waitqueue_head(&kvm->arch.ipte_wq);
+       mutex_init(&kvm->arch.ipte_mutex);
 
        debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
        VM_EVENT(kvm, 3, "%s", "vm created");
@@ -469,6 +946,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm->arch.css_support = 0;
        kvm->arch.use_irqchip = 0;
+       kvm->arch.epoch = 0;
 
        spin_lock_init(&kvm->arch.start_stop_lock);
 
@@ -476,6 +954,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 out_nogmap:
        kfree(kvm->arch.crypto.crycb);
 out_crypto:
+       free_page((unsigned long)kvm->arch.model.fac);
+out_nofac:
        debug_unregister(kvm->arch.dbf);
 out_nodbf:
        free_page((unsigned long)(kvm->arch.sca));
@@ -528,6 +1008,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        kvm_free_vcpus(kvm);
+       free_page((unsigned long)kvm->arch.model.fac);
        free_page((unsigned long)(kvm->arch.sca));
        debug_unregister(kvm->arch.dbf);
        kfree(kvm->arch.crypto.crycb);
@@ -538,25 +1019,30 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 }
 
 /* Section: vcpu related */
+static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
+       if (!vcpu->arch.gmap)
+               return -ENOMEM;
+       vcpu->arch.gmap->private = vcpu->kvm;
+
+       return 0;
+}
+
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
        kvm_clear_async_pf_completion_queue(vcpu);
-       if (kvm_is_ucontrol(vcpu->kvm)) {
-               vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
-               if (!vcpu->arch.gmap)
-                       return -ENOMEM;
-               vcpu->arch.gmap->private = vcpu->kvm;
-               return 0;
-       }
-
-       vcpu->arch.gmap = vcpu->kvm->arch.gmap;
        vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
                                    KVM_SYNC_GPRS |
                                    KVM_SYNC_ACRS |
                                    KVM_SYNC_CRS |
                                    KVM_SYNC_ARCH0 |
                                    KVM_SYNC_PFAULT;
+
+       if (kvm_is_ucontrol(vcpu->kvm))
+               return __kvm_ucontrol_vcpu_init(vcpu);
+
        return 0;
 }
 
@@ -607,16 +1093,27 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        kvm_s390_clear_local_irqs(vcpu);
 }
 
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
-       return 0;
+       mutex_lock(&vcpu->kvm->lock);
+       vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+       mutex_unlock(&vcpu->kvm->lock);
+       if (!kvm_is_ucontrol(vcpu->kvm))
+               vcpu->arch.gmap = vcpu->kvm->arch.gmap;
 }
 
 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
 {
-       if (!test_vfacility(76))
+       if (!test_kvm_facility(vcpu->kvm, 76))
                return;
 
+       vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
+
+       if (vcpu->kvm->arch.crypto.aes_kw)
+               vcpu->arch.sie_block->ecb3 |= ECB3_AES;
+       if (vcpu->kvm->arch.crypto.dea_kw)
+               vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
+
        vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
 }
 
@@ -646,14 +1143,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
                                                    CPUSTAT_STOPPED |
                                                    CPUSTAT_GED);
        vcpu->arch.sie_block->ecb   = 6;
-       if (test_vfacility(50) && test_vfacility(73))
+       if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
                vcpu->arch.sie_block->ecb |= 0x10;
 
        vcpu->arch.sie_block->ecb2  = 8;
-       vcpu->arch.sie_block->eca   = 0xD1002000U;
+       vcpu->arch.sie_block->eca   = 0xC1002000U;
        if (sclp_has_siif())
                vcpu->arch.sie_block->eca |= 1;
-       vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
+       if (sclp_has_sigpif())
+               vcpu->arch.sie_block->eca |= 0x10000000U;
        vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
                                      ICTL_TPROT;
 
@@ -662,10 +1160,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
                if (rc)
                        return rc;
        }
-       hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+       hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
-       get_cpu_id(&vcpu->arch.cpu_id);
-       vcpu->arch.cpu_id.version = 0xff;
+
+       mutex_lock(&vcpu->kvm->lock);
+       vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
+       memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
+              S390_ARCH_FAC_LIST_SIZE_BYTE);
+       vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
+       mutex_unlock(&vcpu->kvm->lock);
 
        kvm_s390_vcpu_crypto_setup(vcpu);
 
@@ -709,9 +1212,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
                set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
        }
+       vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
 
        spin_lock_init(&vcpu->arch.local_int.lock);
-       INIT_LIST_HEAD(&vcpu->arch.local_int.list);
        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
        vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
@@ -734,7 +1237,7 @@ out:
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-       return kvm_cpu_has_interrupt(vcpu);
+       return kvm_s390_vcpu_has_irq(vcpu, 0);
 }
 
 void s390_vcpu_block(struct kvm_vcpu *vcpu)
@@ -862,6 +1365,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
        case KVM_REG_S390_PFTOKEN:
                r = get_user(vcpu->arch.pfault_token,
                             (u64 __user *)reg->addr);
+               if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
+                       kvm_clear_async_pf_completion_queue(vcpu);
                break;
        case KVM_REG_S390_PFCOMPARE:
                r = get_user(vcpu->arch.pfault_compare,
@@ -1114,13 +1619,15 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
                                      unsigned long token)
 {
        struct kvm_s390_interrupt inti;
-       inti.parm64 = token;
+       struct kvm_s390_irq irq;
 
        if (start_token) {
-               inti.type = KVM_S390_INT_PFAULT_INIT;
-               WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
+               irq.u.ext.ext_params2 = token;
+               irq.type = KVM_S390_INT_PFAULT_INIT;
+               WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
        } else {
                inti.type = KVM_S390_INT_PFAULT_DONE;
+               inti.parm64 = token;
                WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
        }
 }
@@ -1167,7 +1674,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
                return 0;
        if (psw_extint_disabled(vcpu))
                return 0;
-       if (kvm_cpu_has_interrupt(vcpu))
+       if (kvm_s390_vcpu_has_irq(vcpu, 0))
                return 0;
        if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
                return 0;
@@ -1332,6 +1839,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->arch.pfault_token = kvm_run->s.regs.pft;
                vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
                vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
+               if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
+                       kvm_clear_async_pf_completion_queue(vcpu);
        }
        kvm_run->kvm_dirty_regs = 0;
 }
@@ -1550,15 +2059,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
        spin_lock(&vcpu->kvm->arch.start_stop_lock);
        online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
 
-       /* Need to lock access to action_bits to avoid a SIGP race condition */
-       spin_lock(&vcpu->arch.local_int.lock);
-       atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
-
        /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
-       vcpu->arch.local_int.action_bits &=
-                                ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
-       spin_unlock(&vcpu->arch.local_int.lock);
+       kvm_s390_clear_stop_irq(vcpu);
 
+       atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
        __disable_ibs_on_vcpu(vcpu);
 
        for (i = 0; i < online_vcpus; i++) {
@@ -1614,11 +2118,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        switch (ioctl) {
        case KVM_S390_INTERRUPT: {
                struct kvm_s390_interrupt s390int;
+               struct kvm_s390_irq s390irq;
 
                r = -EFAULT;
                if (copy_from_user(&s390int, argp, sizeof(s390int)))
                        break;
-               r = kvm_s390_inject_vcpu(vcpu, &s390int);
+               if (s390int_to_s390irq(&s390int, &s390irq))
+                       return -EINVAL;
+               r = kvm_s390_inject_vcpu(vcpu, &s390irq);
                break;
        }
        case KVM_S390_STORE_STATUS:
@@ -1771,30 +2278,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 
 static int __init kvm_s390_init(void)
 {
-       int ret;
-       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
-       if (ret)
-               return ret;
-
-       /*
-        * guests can ask for up to 255+1 double words, we need a full page
-        * to hold the maximum amount of facilities. On the other hand, we
-        * only set facilities that are known to work in KVM.
-        */
-       vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
-       if (!vfacilities) {
-               kvm_exit();
-               return -ENOMEM;
-       }
-       memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
-       vfacilities[0] &= 0xff82fffbf47c2000UL;
-       vfacilities[1] &= 0x005c000000000000UL;
-       return 0;
+       return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
 }
 
 static void __exit kvm_s390_exit(void)
 {
-       free_page((unsigned long) vfacilities);
        kvm_exit();
 }
 
This page took 0.035388 seconds and 5 git commands to generate.