summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2025-10-23 15:22:31 -0300
committerJoerg Roedel <joerg.roedel@amd.com>2025-11-05 09:47:44 +0100
commitaefd967dab6469f5b827b59e50016a760dcc1fbc (patch)
treee12682e44ca5df99efac9ab107d8ed37ba8949e0 /include/linux
parentbcc64b57b48e1c79fe6a53fec3427e14bc2054e7 (diff)
iommupt: Use the incoherent start/stop functions for PT_FEAT_DMA_INCOHERENT
This is the first step to supporting an incoherent walker, start and stop the incoherence around the allocation and frees of the page table memory. The iommu_pages API maps this to dma_map/unmap_single(), or arch cache flushing calls. Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/generic_pt/common.h6
-rw-r--r--include/linux/generic_pt/iommu.h7
2 files changed, 13 insertions, 0 deletions
diff --git a/include/linux/generic_pt/common.h b/include/linux/generic_pt/common.h
index 96f8a6a7d60e..883069e32952 100644
--- a/include/linux/generic_pt/common.h
+++ b/include/linux/generic_pt/common.h
@@ -86,6 +86,12 @@ enum {
*/
enum pt_features {
/**
+ * @PT_FEAT_DMA_INCOHERENT: Cache flush page table memory before
+ * assuming the HW can read it. Otherwise a SMP release is sufficient
+ * for HW to read it.
+ */
+ PT_FEAT_DMA_INCOHERENT,
+ /**
* @PT_FEAT_FULL_VA: The table can span the full VA range from 0 to
* PT_VADDR_MAX.
*/
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
index fde7ccf007c5..21132e342a79 100644
--- a/include/linux/generic_pt/iommu.h
+++ b/include/linux/generic_pt/iommu.h
@@ -57,6 +57,13 @@ struct pt_iommu {
* table walkers.
*/
int nid;
+
+ /**
+ * @iommu_device: Device pointer used for any DMA cache flushing when
+ * PT_FEAT_DMA_INCOHERENT. This is the iommu device that created the
+ * page table which must have dma ops that perform cache flushing.
+ */
+ struct device *iommu_device;
};
/**