diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-18 10:55:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-18 10:55:13 -0700 |
commit | 0cc6f45cecb46cefe89c17ec816dc8cd58a2229a (patch) | |
tree | 8996380ed4473b25607175aafa79756a74c2acf5 /drivers/iommu/intel/nested.c | |
parent | f0cd69b8cca6a5096463644d6dacc9f991bfa521 (diff) | |
parent | 2bd5059c6cc04b02073d4d9f57137ab74e1d8e7a (diff) |
Merge tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
"Core:
- IOMMU memory usage observability - This will make the memory used
for IO page tables explicitly visible.
- Simplify arch_setup_dma_ops()
Intel VT-d:
- Consolidate domain cache invalidation
- Remove private data from page fault message
- Allocate DMAR fault interrupts locally
- Cleanup and refactoring
ARM-SMMUv2:
- Support for fault debugging hardware on Qualcomm implementations
- Re-land support for the ->domain_alloc_paging() callback
ARM-SMMUv3:
- Improve handling of MSI allocation failure
- Drop support for the "disable_bypass" cmdline option
- Major rework of the CD creation code, following on directly from
the STE rework merged last time around.
- Add unit tests for the new STE/CD manipulation logic
AMD-Vi:
- Final part of SVA changes with generic IO page fault handling
Renesas IPMMU:
- Add support for R8A779H0 hardware
... and a couple smaller fixes and updates across the sub-tree"
* tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (80 commits)
iommu/arm-smmu-v3: Make the kunit into a module
arm64: Properly clean up iommu-dma remnants
iommu/amd: Enable Guest Translation after reading IOMMU feature register
iommu/vt-d: Decouple igfx_off from graphic identity mapping
iommu/amd: Fix compilation error
iommu/arm-smmu-v3: Add unit tests for arm_smmu_write_entry
iommu/arm-smmu-v3: Build the whole CD in arm_smmu_make_s1_cd()
iommu/arm-smmu-v3: Move the CD generation for SVA into a function
iommu/arm-smmu-v3: Allocate the CD table entry in advance
iommu/arm-smmu-v3: Make arm_smmu_alloc_cd_ptr()
iommu/arm-smmu-v3: Consolidate clearing a CD table entry
iommu/arm-smmu-v3: Move the CD generation for S1 domains into a function
iommu/arm-smmu-v3: Make CD programming use arm_smmu_write_entry()
iommu/arm-smmu-v3: Add an ops indirection to the STE code
iommu/arm-smmu-qcom: Don't build debug features as a kernel module
iommu/amd: Add SVA domain support
iommu: Add ops->domain_alloc_sva()
iommu/amd: Initial SVA support for AMD IOMMU
iommu/amd: Add support for enable/disable IOPF
iommu/amd: Add IO page fault notifier handler
...
Diffstat (limited to 'drivers/iommu/intel/nested.c')
-rw-r--r-- | drivers/iommu/intel/nested.c | 69 |
1 files changed, 17 insertions, 52 deletions
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index a7d68f3d518a..16a2bcf5cfeb 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -52,13 +52,14 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, return ret; } + ret = cache_tag_assign_domain(dmar_domain, dev, IOMMU_NO_PASID); + if (ret) + goto detach_iommu; + ret = intel_pasid_setup_nested(iommu, dev, IOMMU_NO_PASID, dmar_domain); - if (ret) { - domain_detach_iommu(dmar_domain, iommu); - dev_err_ratelimited(dev, "Failed to setup pasid entry\n"); - return ret; - } + if (ret) + goto unassign_tag; info->domain = dmar_domain; spin_lock_irqsave(&dmar_domain->lock, flags); @@ -68,6 +69,12 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, domain_update_iotlb(dmar_domain); return 0; +unassign_tag: + cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID); +detach_iommu: + domain_detach_iommu(dmar_domain, iommu); + + return ret; } static void intel_nested_domain_free(struct iommu_domain *domain) @@ -81,50 +88,6 @@ static void intel_nested_domain_free(struct iommu_domain *domain) kfree(dmar_domain); } -static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr, - unsigned int mask) -{ - struct device_domain_info *info; - unsigned long flags; - u16 sid, qdep; - - spin_lock_irqsave(&domain->lock, flags); - list_for_each_entry(info, &domain->devices, link) { - if (!info->ats_enabled) - continue; - sid = info->bus << 8 | info->devfn; - qdep = info->ats_qdep; - qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, - qdep, addr, mask); - quirk_extra_dev_tlb_flush(info, addr, mask, - IOMMU_NO_PASID, qdep); - } - spin_unlock_irqrestore(&domain->lock, flags); -} - -static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr, - u64 npages, bool ih) -{ - struct iommu_domain_info *info; - unsigned int mask; - unsigned long i; - - xa_for_each(&domain->iommu_array, i, info) - qi_flush_piotlb(info->iommu, - domain_id_iommu(domain, info->iommu), - IOMMU_NO_PASID, addr, npages, ih); - - if (!domain->has_iotlb_device) - return; - - if (npages == U64_MAX) - mask = 64 - VTD_PAGE_SHIFT; - else - mask = ilog2(__roundup_pow_of_two(npages)); - - nested_flush_dev_iotlb(domain, addr, mask); -} - static int intel_nested_cache_invalidate_user(struct iommu_domain *domain, struct iommu_user_data_array *array) { @@ -157,9 +120,9 @@ static int intel_nested_cache_invalidate_user(struct iommu_domain *domain, break; } - intel_nested_flush_cache(dmar_domain, inv_entry.addr, - inv_entry.npages, - inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF); + cache_tag_flush_range(dmar_domain, inv_entry.addr, + inv_entry.addr + nrpages_to_size(inv_entry.npages) - 1, + inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF); processed++; } @@ -206,7 +169,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, domain->domain.type = IOMMU_DOMAIN_NESTED; INIT_LIST_HEAD(&domain->devices); INIT_LIST_HEAD(&domain->dev_pasids); + INIT_LIST_HEAD(&domain->cache_tags); spin_lock_init(&domain->lock); + spin_lock_init(&domain->cache_lock); xa_init(&domain->iommu_array); spin_lock(&s2_domain->s1_lock); |