diff options
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r-- | drivers/iommu/iommu.c | 107 |
1 files changed, 52 insertions, 55 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 6c02f93422ce..a4b606c591da 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -277,6 +277,8 @@ int iommu_device_register(struct iommu_device *iommu, err = bus_iommu_probe(iommu_buses[i]); if (err) iommu_device_unregister(iommu); + else + WRITE_ONCE(iommu->ready, true); return err; } EXPORT_SYMBOL_GPL(iommu_device_register); @@ -422,13 +424,15 @@ static int iommu_init_device(struct device *dev) * is buried in the bus dma_configure path. Properly unpicking that is * still a big job, so for now just invoke the whole thing. The device * already having a driver bound means dma_configure has already run and - * either found no IOMMU to wait for, or we're in its replay call right - * now, so either way there's no point calling it again. + * found no IOMMU to wait for, so there's no point calling it again. */ - if (!dev->driver && dev->bus->dma_configure) { + if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) { mutex_unlock(&iommu_probe_device_lock); dev->bus->dma_configure(dev); mutex_lock(&iommu_probe_device_lock); + /* If another instance finished the job for us, skip it */ + if (!dev->iommu || dev->iommu_group) + return -ENODEV; } /* * At this point, relevant devices either now have a fwspec which will @@ -1629,15 +1633,13 @@ static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) if (ops->identity_domain) return ops->identity_domain; - /* Older drivers create the identity domain via ops->domain_alloc() */ - if (!ops->domain_alloc) + if (ops->domain_alloc_identity) { + domain = ops->domain_alloc_identity(dev); + if (IS_ERR(domain)) + return domain; + } else { return ERR_PTR(-EOPNOTSUPP); - - domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY); - if (IS_ERR(domain)) - return domain; - if (!domain) - return ERR_PTR(-ENOMEM); + } iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); return domain; @@ -2025,8 +2027,10 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, domain = ops->domain_alloc_paging(dev); else if (ops->domain_alloc_paging_flags) domain = ops->domain_alloc_paging_flags(dev, flags, NULL); +#if IS_ENABLED(CONFIG_FSL_PAMU) else if (ops->domain_alloc && !flags) domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); +#endif else return ERR_PTR(-EOPNOTSUPP); @@ -2204,6 +2208,19 @@ static void *iommu_make_pasid_array_entry(struct iommu_domain *domain, return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN); } +static bool domain_iommu_ops_compatible(const struct iommu_ops *ops, + struct iommu_domain *domain) +{ + if (domain->owner == ops) + return true; + + /* For static domains, owner isn't set. */ + if (domain == ops->blocked_domain || domain == ops->identity_domain) + return true; + + return false; +} + static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { @@ -2214,7 +2231,8 @@ static int __iommu_attach_group(struct iommu_domain *domain, return -EBUSY; dev = iommu_group_first_dev(group); - if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) + if (!dev_has_iommu(dev) || + !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain)) return -EINVAL; return __iommu_group_set_domain(group, domain); @@ -2395,6 +2413,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, unsigned int pgsize_idx, pgsize_idx_next; unsigned long pgsizes; size_t offset, pgsize, pgsize_next; + size_t offset_end; unsigned long addr_merge = paddr | iova; /* Page sizes supported by the hardware and small enough for @size */ @@ -2435,7 +2454,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, * If size is big enough to accommodate the larger page, reduce * the number of smaller pages. */ - if (offset + pgsize_next <= size) + if (!check_add_overflow(offset, pgsize_next, &offset_end) && + offset_end <= size) size = offset; out_set_count: @@ -2842,31 +2862,39 @@ bool iommu_default_passthrough(void) } EXPORT_SYMBOL_GPL(iommu_default_passthrough); -const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) +static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode) { - const struct iommu_ops *ops = NULL; - struct iommu_device *iommu; + const struct iommu_device *iommu, *ret = NULL; spin_lock(&iommu_device_lock); list_for_each_entry(iommu, &iommu_device_list, list) if (iommu->fwnode == fwnode) { - ops = iommu->ops; + ret = iommu; break; } spin_unlock(&iommu_device_lock); - return ops; + return ret; +} + +const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) +{ + const struct iommu_device *iommu = iommu_from_fwnode(fwnode); + + return iommu ? iommu->ops : NULL; } int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) { - const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode); + const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - if (!ops) + if (!iommu) return driver_deferred_probe_check_state(dev); + if (!dev->iommu && !READ_ONCE(iommu->ready)) + return -EPROBE_DEFER; if (fwspec) - return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; + return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; if (!dev_iommu_get(dev)) return -ENOMEM; @@ -2920,38 +2948,6 @@ int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); -/* - * Per device IOMMU features. - */ -int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) -{ - if (dev_has_iommu(dev)) { - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (ops->dev_enable_feat) - return ops->dev_enable_feat(dev, feat); - } - - return -ENODEV; -} -EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); - -/* - * The device drivers should do the necessary cleanups before calling this. - */ -int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) -{ - if (dev_has_iommu(dev)) { - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (ops->dev_disable_feat) - return ops->dev_disable_feat(dev, feat); - } - - return -EBUSY; -} -EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); - /** * iommu_setup_default_domain - Set the default_domain for the group * @group: Group to change @@ -3454,7 +3450,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain, !ops->blocked_domain->ops->set_dev_pasid) return -EOPNOTSUPP; - if (ops != domain->owner || pasid == IOMMU_NO_PASID) + if (!domain_iommu_ops_compatible(ops, domain) || + pasid == IOMMU_NO_PASID) return -EINVAL; mutex_lock(&group->mutex); @@ -3536,7 +3533,7 @@ int iommu_replace_device_pasid(struct iommu_domain *domain, if (!domain->ops->set_dev_pasid) return -EOPNOTSUPP; - if (dev_iommu_ops(dev) != domain->owner || + if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) || pasid == IOMMU_NO_PASID || !handle) return -EINVAL; |