summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYi Liu <yi.l.liu@intel.com>2025-02-25 17:18:49 -0800
committerJason Gunthorpe <jgg@nvidia.com>2025-02-28 10:07:14 -0400
commit5e9f822c9c683ae884fa5e71df41d1647b2876c6 (patch)
tree0670ee78533f9fefa08118fafdff3858a29609c1
parente1ea9d30d84c65e96eba27b240be5b6798350490 (diff)
iommu: Swap the order of setting group->pasid_array and calling attach op of iommu drivers
The current implementation stores entry to the group->pasid_array before the underlying iommu driver has successfully set the new domain. This can lead to issues where PRIs are received on the new domain before the attach operation is completed. This patch swaps the order of operations to ensure that the domain is set in the underlying iommu driver before updating the group->pasid_array. Link: https://patch.msgid.link/r/20250226011849.5102-5-yi.l.liu@intel.com Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Yi Liu <yi.l.liu@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r--drivers/iommu/iommu.c48
1 files changed, 36 insertions, 12 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d3652518c734..0ee17893810f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3388,13 +3388,29 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
entry = iommu_make_pasid_array_entry(domain, handle);
- ret = xa_insert(&group->pasid_array, pasid, entry, GFP_KERNEL);
+ /*
+ * Entry present is a failure case. Use xa_insert() instead of
+ * xa_reserve().
+ */
+ ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL);
if (ret)
goto out_unlock;
ret = __iommu_set_group_pasid(domain, group, pasid);
- if (ret)
- xa_erase(&group->pasid_array, pasid);
+ if (ret) {
+ xa_release(&group->pasid_array, pasid);
+ goto out_unlock;
+ }
+
+ /*
+ * The xa_insert() above reserved the memory, and the group->mutex is
+ * held, this cannot fail. The new domain cannot be visible until the
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
+ * queued and then failing attach.
+ */
+ WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+ pasid, entry, GFP_KERNEL)));
+
out_unlock:
mutex_unlock(&group->mutex);
return ret;
@@ -3509,19 +3525,27 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
mutex_lock(&group->mutex);
entry = iommu_make_pasid_array_entry(domain, handle);
- ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
+ ret = xa_insert(&group->pasid_array,
+ IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL);
if (ret)
- goto err_unlock;
+ goto out_unlock;
ret = __iommu_attach_group(domain, group);
- if (ret)
- goto err_erase;
- mutex_unlock(&group->mutex);
+ if (ret) {
+ xa_release(&group->pasid_array, IOMMU_NO_PASID);
+ goto out_unlock;
+ }
- return 0;
-err_erase:
- xa_erase(&group->pasid_array, IOMMU_NO_PASID);
-err_unlock:
+ /*
+ * The xa_insert() above reserved the memory, and the group->mutex is
+ * held, this cannot fail. The new domain cannot be visible until the
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
+ * queued and then failing attach.
+ */
+ WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+ IOMMU_NO_PASID, entry, GFP_KERNEL)));
+
+out_unlock:
mutex_unlock(&group->mutex);
return ret;
}