diff options
28 files changed, 2644 insertions, 2423 deletions
diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst index fa651e25d98c..87c6f4a6ca32 100644 --- a/Documentation/PCI/pci.rst +++ b/Documentation/PCI/pci.rst @@ -103,6 +103,7 @@ need pass only as many optional fields as necessary: - subvendor and subdevice fields default to PCI_ANY_ID (FFFFFFFF) - class and classmask fields default to 0 - driver_data defaults to 0UL. + - override_only field defaults to 0. Note that driver_data must match the value used by any of the pci_device_id entries defined in the driver. This makes the driver_data field mandatory diff --git a/MAINTAINERS b/MAINTAINERS index c9467d2839f5..7f0fcaa8ee67 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -19466,6 +19466,7 @@ T: git git://github.com/awilliam/linux-vfio.git F: Documentation/driver-api/vfio.rst F: drivers/vfio/ F: include/linux/vfio.h +F: include/linux/vfio_pci_core.h F: include/uapi/linux/vfio.h VFIO FSL-MC DRIVER diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 161a9e12bfb8..d681ae462350 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -798,14 +798,12 @@ struct kvm_s390_cpu_model { unsigned short ibc; }; -struct kvm_s390_module_hook { - int (*hook)(struct kvm_vcpu *vcpu); - struct module *owner; -}; +typedef int (*crypto_hook)(struct kvm_vcpu *vcpu); struct kvm_s390_crypto { struct kvm_s390_crypto_cb *crycb; - struct kvm_s390_module_hook *pqap_hook; + struct rw_semaphore pqap_hook_rwsem; + crypto_hook *pqap_hook; __u32 crycbd; __u8 aes_kw; __u8 dea_kw; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4527ac7b5961..efda0615741f 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2559,12 +2559,26 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm) kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; } +/* + * kvm_arch_crypto_set_masks + * + * @kvm: pointer to the target guest's KVM struct containing the crypto masks + * to be set. + * @apm: the mask identifying the accessible AP adapters + * @aqm: the mask identifying the accessible AP domains + * @adm: the mask identifying the accessible AP control domains + * + * Set the masks that identify the adapters, domains and control domains to + * which the KVM guest is granted access. + * + * Note: The kvm->lock mutex must be locked by the caller before invoking this + * function. + */ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, unsigned long *aqm, unsigned long *adm) { struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; - mutex_lock(&kvm->lock); kvm_s390_vcpu_block_all(kvm); switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { @@ -2595,13 +2609,23 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, /* recreate the shadow crycb for each vcpu */ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); kvm_s390_vcpu_unblock_all(kvm); - mutex_unlock(&kvm->lock); } EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); +/* + * kvm_arch_crypto_clear_masks + * + * @kvm: pointer to the target guest's KVM struct containing the crypto masks + * to be cleared. + * + * Clear the masks that identify the adapters, domains and control domains to + * which the KVM guest is granted access. + * + * Note: The kvm->lock mutex must be locked by the caller before invoking this + * function. + */ void kvm_arch_crypto_clear_masks(struct kvm *kvm) { - mutex_lock(&kvm->lock); kvm_s390_vcpu_block_all(kvm); memset(&kvm->arch.crypto.crycb->apcb0, 0, @@ -2613,7 +2637,6 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm) /* recreate the shadow crycb for each vcpu */ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); kvm_s390_vcpu_unblock_all(kvm); - mutex_unlock(&kvm->lock); } EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks); @@ -2630,6 +2653,7 @@ static void kvm_s390_crypto_init(struct kvm *kvm) { kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; kvm_s390_set_crycb_format(kvm); + init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); if (!test_kvm_facility(kvm, 76)) return; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 9928f785c677..53da4ceb16a3 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -610,6 +610,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) static int handle_pqap(struct kvm_vcpu *vcpu) { struct ap_queue_status status = {}; + crypto_hook pqap_hook; unsigned long reg0; int ret; uint8_t fc; @@ -654,18 +655,20 @@ static int handle_pqap(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* - * Verify that the hook callback is registered, lock the owner - * and call the hook. + * If the hook callback is registered, there will be a pointer to the + * hook function pointer in the kvm_s390_crypto structure. Lock the + * owner, retrieve the hook function pointer and call the hook. */ + down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); if (vcpu->kvm->arch.crypto.pqap_hook) { - if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner)) - return -EOPNOTSUPP; - ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu); - module_put(vcpu->kvm->arch.crypto.pqap_hook->owner); + pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook; + ret = pqap_hook(vcpu); if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000) kvm_s390_set_psw_cc(vcpu, 3); + up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); return ret; } + up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); /* * A vfio_driver must register a hook. * No hook means no driver to enable the SIE CRYCB and no queues. diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 3a72352aa5cf..123c590ebe1d 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -136,7 +136,7 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev) { struct pci_dynid *dynid; - const struct pci_device_id *found_id = NULL; + const struct pci_device_id *found_id = NULL, *ids; /* When driver_override is set, only bind to the matching driver */ if (dev->driver_override && strcmp(dev->driver_override, drv->name)) @@ -152,14 +152,28 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, } spin_unlock(&drv->dynids.lock); - if (!found_id) - found_id = pci_match_id(drv->id_table, dev); + if (found_id) + return found_id; - /* driver_override will always match, send a dummy id */ - if (!found_id && dev->driver_override) - found_id = &pci_device_id_any; + for (ids = drv->id_table; (found_id = pci_match_id(ids, dev)); + ids = found_id + 1) { + /* + * The match table is split based on driver_override. + * In case override_only was set, enforce driver_override + * matching. + */ + if (found_id->override_only) { + if (dev->driver_override) + return found_id; + } else { + return found_id; + } + } - return found_id; + /* driver_override will always match, send a dummy id */ + if (dev->driver_override) + return &pci_device_id_any; + return NULL; } /** diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index cee5626fe0a4..2347808fa3e4 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -24,8 +24,9 @@ #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" -static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev); +static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); +static const struct vfio_device_ops vfio_ap_matrix_dev_ops; static int match_apqn(struct device *dev, const void *data) { @@ -294,15 +295,6 @@ static int handle_pqap(struct kvm_vcpu *vcpu) matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, struct ap_matrix_mdev, pqap_hook); - /* - * If the KVM pointer is in the process of being set, wait until the - * process has completed. - */ - wait_event_cmd(matrix_mdev->wait_for_kvm, - !matrix_mdev->kvm_busy, - mutex_unlock(&matrix_dev->lock), - mutex_lock(&matrix_dev->lock)); - /* If the there is no guest using the mdev, there is nothing to do */ if (!matrix_mdev->kvm) goto out_unlock; @@ -335,45 +327,57 @@ static void vfio_ap_matrix_init(struct ap_config_info *info, matrix->adm_max = info->apxa ? info->Nd : 15; } -static int vfio_ap_mdev_create(struct mdev_device *mdev) +static int vfio_ap_mdev_probe(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev; + int ret; if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0)) return -EPERM; matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL); if (!matrix_mdev) { - atomic_inc(&matrix_dev->available_instances); - return -ENOMEM; + ret = -ENOMEM; + goto err_dec_available; } + vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev, + &vfio_ap_matrix_dev_ops); matrix_mdev->mdev = mdev; vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); - init_waitqueue_head(&matrix_mdev->wait_for_kvm); - mdev_set_drvdata(mdev, matrix_mdev); - matrix_mdev->pqap_hook.hook = handle_pqap; - matrix_mdev->pqap_hook.owner = THIS_MODULE; + matrix_mdev->pqap_hook = handle_pqap; mutex_lock(&matrix_dev->lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); mutex_unlock(&matrix_dev->lock); + ret = vfio_register_group_dev(&matrix_mdev->vdev); + if (ret) + goto err_list; + dev_set_drvdata(&mdev->dev, matrix_mdev); return 0; + +err_list: + mutex_lock(&matrix_dev->lock); + list_del(&matrix_mdev->node); + mutex_unlock(&matrix_dev->lock); + kfree(matrix_mdev); +err_dec_available: + atomic_inc(&matrix_dev->available_instances); + return ret; } -static int vfio_ap_mdev_remove(struct mdev_device *mdev) +static void vfio_ap_mdev_remove(struct mdev_device *mdev) { - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); + + vfio_unregister_group_dev(&matrix_mdev->vdev); mutex_lock(&matrix_dev->lock); - vfio_ap_mdev_reset_queues(mdev); + vfio_ap_mdev_reset_queues(matrix_mdev); list_del(&matrix_mdev->node); kfree(matrix_mdev); - mdev_set_drvdata(mdev, NULL); atomic_inc(&matrix_dev->available_instances); mutex_unlock(&matrix_dev->lock); - - return 0; } static ssize_t name_show(struct mdev_type *mtype, @@ -615,16 +619,12 @@ static ssize_t assign_adapter_store(struct device *dev, { int ret; unsigned long apid; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of adapter - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow assignment of adapter */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -688,16 +688,12 @@ static ssize_t unassign_adapter_store(struct device *dev, { int ret; unsigned long apid; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of adapter - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow unassignment of adapter */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -777,17 +773,13 @@ static ssize_t assign_domain_store(struct device *dev, { int ret; unsigned long apqi; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_apqi = matrix_mdev->matrix.aqm_max; mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * assignment of domain - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow assignment of domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -846,16 +838,12 @@ static ssize_t unassign_domain_store(struct device *dev, { int ret; unsigned long apqi; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of domain - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow unassignment of domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -900,16 +888,12 @@ static ssize_t assign_control_domain_store(struct device *dev, { int ret; unsigned long id; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * assignment of control domain. - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow assignment of control domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -958,17 +942,13 @@ static ssize_t unassign_control_domain_store(struct device *dev, { int ret; unsigned long domid; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_domid = matrix_mdev->matrix.adm_max; mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of control domain. - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If a KVM guest is running, disallow unassignment of control domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -997,8 +977,7 @@ static ssize_t control_domains_show(struct device *dev, int nchars = 0; int n; char *bufpos = buf; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_domid = matrix_mdev->matrix.adm_max; mutex_lock(&matrix_dev->lock); @@ -1016,8 +995,7 @@ static DEVICE_ATTR_RO(control_domains); static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); char *bufpos = buf; unsigned long apid; unsigned long apqi; @@ -1109,23 +1087,30 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, struct ap_matrix_mdev *m; if (kvm->arch.crypto.crycbd) { + down_write(&kvm->arch.crypto.pqap_hook_rwsem); + kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; + up_write(&kvm->arch.crypto.pqap_hook_rwsem); + + mutex_lock(&kvm->lock); + mutex_lock(&matrix_dev->lock); + list_for_each_entry(m, &matrix_dev->mdev_list, node) { - if (m != matrix_mdev && m->kvm == kvm) + if (m != matrix_mdev && m->kvm == kvm) { + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->lock); return -EPERM; + } } kvm_get_kvm(kvm); - matrix_mdev->kvm_busy = true; - mutex_unlock(&matrix_dev->lock); + matrix_mdev->kvm = kvm; kvm_arch_crypto_set_masks(kvm, matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm, matrix_mdev->matrix.adm); - mutex_lock(&matrix_dev->lock); - kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; - matrix_mdev->kvm = kvm; - matrix_mdev->kvm_busy = false; - wake_up_all(&matrix_mdev->wait_for_kvm); + + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->lock); } return 0; @@ -1175,28 +1160,24 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb, * done under the @matrix_mdev->lock. * */ -static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) +static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev, + struct kvm *kvm) { - /* - * If the KVM pointer is in the process of being set, wait until the - * process has completed. - */ - wait_event_cmd(matrix_mdev->wait_for_kvm, - !matrix_mdev->kvm_busy, - mutex_unlock(&matrix_dev->lock), - mutex_lock(&matrix_dev->lock)); + if (kvm && kvm->arch.crypto.crycbd) { + down_write(&kvm->arch.crypto.pqap_hook_rwsem); + kvm->arch.crypto.pqap_hook = NULL; + up_write(&kvm->arch.crypto.pqap_hook_rwsem); - if (matrix_mdev->kvm) { - matrix_mdev->kvm_busy = true; - mutex_unlock(&matrix_dev->lock); - kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + mutex_lock(&kvm->lock); mutex_lock(&matrix_dev->lock); - vfio_ap_mdev_reset_queues(matrix_mdev->mdev); - matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; - kvm_put_kvm(matrix_mdev->kvm); + + kvm_arch_crypto_clear_masks(kvm); + vfio_ap_mdev_reset_queues(matrix_mdev); + kvm_put_kvm(kvm); matrix_mdev->kvm = NULL; - matrix_mdev->kvm_busy = false; - wake_up_all(&matrix_mdev->wait_for_kvm); + + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->lock); } } @@ -1209,16 +1190,13 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, if (action != VFIO_GROUP_NOTIFY_SET_KVM) return NOTIFY_OK; - mutex_lock(&matrix_dev->lock); matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); if (!data) - vfio_ap_mdev_unset_kvm(matrix_mdev); + vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); else if (vfio_ap_mdev_set_kvm(matrix_mdev, data)) notify_rc = NOTIFY_DONE; - mutex_unlock(&matrix_dev->lock); - return notify_rc; } @@ -1288,13 +1266,12 @@ free_resources: return ret; } -static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) +static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) { int ret; int rc = 0; unsigned long apid, apqi; struct vfio_ap_queue *q; - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, matrix_mdev->matrix.apm_max + 1) { @@ -1315,52 +1292,45 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) return rc; } -static int vfio_ap_mdev_open_device(struct mdev_device *mdev) +static int vfio_ap_mdev_open_device(struct vfio_device *vdev) { - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = + container_of(vdev, struct ap_matrix_mdev, vdev); unsigned long events; int ret; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier; events = VFIO_GROUP_NOTIFY_SET_KVM; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + ret = vfio_register_notifier(vdev->dev, VFIO_GROUP_NOTIFY, &events, &matrix_mdev->group_notifier); - if (ret) { - module_put(THIS_MODULE); + if (ret) return ret; - } matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &events, &matrix_mdev->iommu_notifier); - if (!ret) - return ret; + if (ret) + goto out_unregister_group; + return 0; - vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, +out_unregister_group: + vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); - module_put(THIS_MODULE); return ret; } -static void vfio_ap_mdev_close_device(struct mdev_device *mdev) +static void vfio_ap_mdev_close_device(struct vfio_device *vdev) { - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); - - mutex_lock(&matrix_dev->lock); - vfio_ap_mdev_unset_kvm(matrix_mdev); - mutex_unlock(&matrix_dev->lock); + struct ap_matrix_mdev *matrix_mdev = + container_of(vdev, struct ap_matrix_mdev, vdev); - vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &matrix_mdev->iommu_notifier); - vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); - module_put(THIS_MODULE); + vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); } static int vfio_ap_mdev_get_device_info(unsigned long arg) @@ -1383,11 +1353,12 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } -static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, +static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, unsigned int cmd, unsigned long arg) { + struct ap_matrix_mdev *matrix_mdev = + container_of(vdev, struct ap_matrix_mdev, vdev); int ret; - struct ap_matrix_mdev *matrix_mdev; mutex_lock(&matrix_dev->lock); switch (cmd) { @@ -1395,22 +1366,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, ret = vfio_ap_mdev_get_device_info(arg); break; case VFIO_DEVICE_RESET: - matrix_mdev = mdev_get_drvdata(mdev); - if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) { - ret = -EINVAL; - break; - } - - /* - * If the KVM pointer is in the process of being set, wait until - * the process has completed. - */ - wait_event_cmd(matrix_mdev->wait_for_kvm, - !matrix_mdev->kvm_busy, - mutex_unlock(&matrix_dev->lock), - mutex_lock(&matrix_dev->lock)); - - ret = vfio_ap_mdev_reset_queues(mdev); + ret = vfio_ap_mdev_reset_queues(matrix_mdev); break; default: ret = -EOPNOTSUPP; @@ -1421,25 +1377,51 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, return ret; } +static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { + .open_device = vfio_ap_mdev_open_device, + .close_device = vfio_ap_mdev_close_device, + .ioctl = vfio_ap_mdev_ioctl, +}; + +static struct mdev_driver vfio_ap_matrix_driver = { + .driver = { + .name = "vfio_ap_mdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + .dev_groups = vfio_ap_mdev_attr_groups, + }, + .probe = vfio_ap_mdev_probe, + .remove = vfio_ap_mdev_remove, +}; + static const struct mdev_parent_ops vfio_ap_matrix_ops = { .owner = THIS_MODULE, + .device_driver = &vfio_ap_matrix_driver, .supported_type_groups = vfio_ap_mdev_type_groups, - .mdev_attr_groups = vfio_ap_mdev_attr_groups, - .create = vfio_ap_mdev_create, - .remove = vfio_ap_mdev_remove, - .open_device = vfio_ap_mdev_open_device, - .close_device = vfio_ap_mdev_close_device, - .ioctl = vfio_ap_mdev_ioctl, }; int vfio_ap_mdev_register(void) { + int ret; + atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT); - return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); + ret = mdev_register_driver(&vfio_ap_matrix_driver); + if (ret) + return ret; + + ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); + if (ret) + goto err_driver; + return 0; + +err_driver: + mdev_unregister_driver(&vfio_ap_matrix_driver); + return ret; } void vfio_ap_mdev_unregister(void) { mdev_unregister_device(&matrix_dev->device); + mdev_unregister_driver(&vfio_ap_matrix_driver); } diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index f82a6396acae..77760e2b546f 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -18,6 +18,7 @@ #include <linux/delay.h> #include <linux/mutex.h> #include <linux/kvm_host.h> +#include <linux/vfio.h> #include "ap_bus.h" @@ -79,14 +80,13 @@ struct ap_matrix { * @kvm: the struct holding guest's state */ struct ap_matrix_mdev { + struct vfio_device vdev; struct list_head node; struct ap_matrix matrix; struct notifier_block group_notifier; struct notifier_block iommu_notifier; - bool kvm_busy; - wait_queue_head_t wait_for_kvm; struct kvm *kvm; - struct kvm_s390_module_hook pqap_hook; + crypto_hook pqap_hook; struct mdev_device *mdev; }; diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index e44bf736e2b2..6130d00252ed 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -1,12 +1,22 @@ # SPDX-License-Identifier: GPL-2.0-only +menuconfig VFIO + tristate "VFIO Non-Privileged userspace driver framework" + select IOMMU_API + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + help + VFIO provides a framework for secure userspace device drivers. + See Documentation/driver-api/vfio.rst for more details. + + If you don't know what to do here, say N. + +if VFIO config VFIO_IOMMU_TYPE1 tristate - depends on VFIO default n config VFIO_IOMMU_SPAPR_TCE tristate - depends on VFIO && SPAPR_TCE_IOMMU + depends on SPAPR_TCE_IOMMU default VFIO config VFIO_SPAPR_EEH @@ -16,22 +26,11 @@ config VFIO_SPAPR_EEH config VFIO_VIRQFD tristate - depends on VFIO && EVENTFD + select EVENTFD default n -menuconfig VFIO - tristate "VFIO Non-Privileged userspace driver framework" - select IOMMU_API - select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) - help - VFIO provides a framework for secure userspace device drivers. - See Documentation/driver-api/vfio.rst for more details. - - If you don't know what to do here, say N. - config VFIO_NOIOMMU bool "VFIO No-IOMMU support" - depends on VFIO help VFIO is built on the ability to isolate devices using the IOMMU. Only with an IOMMU can userspace access to DMA capable devices be @@ -48,4 +47,6 @@ source "drivers/vfio/pci/Kconfig" source "drivers/vfio/platform/Kconfig" source "drivers/vfio/mdev/Kconfig" source "drivers/vfio/fsl-mc/Kconfig" +endif + source "virt/lib/Kconfig" diff --git a/drivers/vfio/fsl-mc/Kconfig b/drivers/vfio/fsl-mc/Kconfig index b1a527d6b6f2..597d338c5c8a 100644 --- a/drivers/vfio/fsl-mc/Kconfig +++ b/drivers/vfio/fsl-mc/Kconfig @@ -1,6 +1,7 @@ config VFIO_FSL_MC tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices" - depends on VFIO && FSL_MC_BUS && EVENTFD + depends on FSL_MC_BUS + select EVENTFD help Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc (Management Complex) devices. This is required to passthrough diff --git a/drivers/vfio/mdev/Kconfig b/drivers/vfio/mdev/Kconfig index 763c877a1318..646dbed44eb2 100644 --- a/drivers/vfio/mdev/Kconfig +++ b/drivers/vfio/mdev/Kconfig @@ -2,7 +2,6 @@ config VFIO_MDEV tristate "Mediated device driver framework" - depends |
