summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommufd/selftest.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommufd/selftest.c')
-rw-r--r--drivers/iommu/iommufd/selftest.c569
1 files changed, 314 insertions, 255 deletions
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index de178827a078..c4322fd26f93 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -5,6 +5,8 @@
*/
#include <linux/anon_inodes.h>
#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <linux/fault-inject.h>
#include <linux/file.h>
#include <linux/iommu.h>
@@ -12,6 +14,8 @@
#include <linux/slab.h>
#include <linux/xarray.h>
#include <uapi/linux/iommufd.h>
+#include <linux/generic_pt/iommu.h>
+#include "../iommu-pages.h"
#include "../iommu-priv.h"
#include "io_pagetable.h"
@@ -41,21 +45,6 @@ static DEFINE_IDA(mock_dev_ida);
enum {
MOCK_DIRTY_TRACK = 1,
- MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
- MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
-
- /*
- * Like a real page table alignment requires the low bits of the address
- * to be zero. xarray also requires the high bit to be zero, so we store
- * the pfns shifted. The upper bits are used for metadata.
- */
- MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
-
- _MOCK_PFN_START = MOCK_PFN_MASK + 1,
- MOCK_PFN_START_IOVA = _MOCK_PFN_START,
- MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
- MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
- MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
@@ -124,10 +113,15 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
}
struct mock_iommu_domain {
+ union {
+ struct iommu_domain domain;
+ struct pt_iommu iommu;
+ struct pt_iommu_amdv1 amdv1;
+ };
unsigned long flags;
- struct iommu_domain domain;
- struct xarray pfns;
};
+PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, iommu, domain);
+PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, amdv1.iommu, domain);
static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain *domain)
@@ -216,7 +210,7 @@ static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
}
static int mock_domain_nop_attach(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct mock_dev *mdev = to_mock_dev(dev);
struct mock_viommu *new_viommu = NULL;
@@ -344,74 +338,6 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
-static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
- unsigned long iova, size_t page_size,
- unsigned long flags)
-{
- unsigned long cur, end = iova + page_size - 1;
- bool dirty = false;
- void *ent, *old;
-
- for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
- ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
- if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
- continue;
-
- dirty = true;
- /* Clear dirty */
- if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
- unsigned long val;
-
- val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- }
- }
-
- return dirty;
-}
-
-static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- unsigned long flags,
- struct iommu_dirty_bitmap *dirty)
-{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- unsigned long end = iova + size;
- void *ent;
-
- if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
- return -EINVAL;
-
- do {
- unsigned long pgsize = MOCK_IO_PAGE_SIZE;
- unsigned long head;
-
- ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
- if (!ent) {
- iova += pgsize;
- continue;
- }
-
- if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
- pgsize = MOCK_HUGE_PAGE_SIZE;
- head = iova & ~(pgsize - 1);
-
- /* Clear dirty */
- if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
- iommu_dirty_bitmap_record(dirty, iova, pgsize);
- iova += pgsize;
- } while (iova < end);
-
- return 0;
-}
-
-static const struct iommu_dirty_ops dirty_ops = {
- .set_dirty_tracking = mock_domain_set_dirty_tracking,
- .read_and_clear_dirty = mock_domain_read_and_clear_dirty,
-};
-
static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data *user_data)
{
@@ -446,7 +372,7 @@ mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
if (flags & ~IOMMU_HWPT_ALLOC_PASID)
return ERR_PTR(-EOPNOTSUPP);
- if (!parent || parent->ops != mock_ops.default_domain_ops)
+ if (!parent || !(parent->type & __IOMMU_DOMAIN_PAGING))
return ERR_PTR(-EINVAL);
mock_parent = to_mock_domain(parent);
@@ -459,159 +385,170 @@ mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
return &mock_nested->domain;
}
-static struct iommu_domain *
-mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
- const struct iommu_user_data *user_data)
-{
- bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
- IOMMU_HWPT_ALLOC_NEST_PARENT |
- IOMMU_HWPT_ALLOC_PASID;
- struct mock_dev *mdev = to_mock_dev(dev);
- bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
- struct mock_iommu_domain *mock;
-
- if (user_data)
- return ERR_PTR(-EOPNOTSUPP);
- if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
- return ERR_PTR(-EOPNOTSUPP);
-
- mock = kzalloc(sizeof(*mock), GFP_KERNEL);
- if (!mock)
- return ERR_PTR(-ENOMEM);
- mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
- mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
- mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
- if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
- mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
- mock->domain.ops = mock_ops.default_domain_ops;
- mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
- xa_init(&mock->pfns);
-
- if (has_dirty_flag)
- mock->domain.dirty_ops = &dirty_ops;
- return &mock->domain;
-}
-
static void mock_domain_free(struct iommu_domain *domain)
{
struct mock_iommu_domain *mock = to_mock_domain(domain);
- WARN_ON(!xa_empty(&mock->pfns));
+ pt_iommu_deinit(&mock->iommu);
kfree(mock);
}
-static int mock_domain_map_pages(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t pgsize, size_t pgcount, int prot,
- gfp_t gfp, size_t *mapped)
+static void mock_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- unsigned long flags = MOCK_PFN_START_IOVA;
- unsigned long start_iova = iova;
+ iommu_put_pages_list(&gather->freelist);
+}
- /*
- * xarray does not reliably work with fault injection because it does a
- * retry allocation, so put our own failure point.
- */
- if (iommufd_should_fail())
- return -ENOENT;
+static const struct iommu_domain_ops amdv1_mock_ops = {
+ IOMMU_PT_DOMAIN_OPS(amdv1_mock),
+ .free = mock_domain_free,
+ .attach_dev = mock_domain_nop_attach,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
+ .iotlb_sync = &mock_iotlb_sync,
+};
- WARN_ON(iova % MOCK_IO_PAGE_SIZE);
- WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
- for (; pgcount; pgcount--) {
- size_t cur;
+static const struct iommu_domain_ops amdv1_mock_huge_ops = {
+ IOMMU_PT_DOMAIN_OPS(amdv1_mock),
+ .free = mock_domain_free,
+ .attach_dev = mock_domain_nop_attach,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
+ .iotlb_sync = &mock_iotlb_sync,
+};
+#undef pt_iommu_amdv1_mock_map_pages
- for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
- void *old;
+static const struct iommu_dirty_ops amdv1_mock_dirty_ops = {
+ IOMMU_PT_DIRTY_OPS(amdv1_mock),
+ .set_dirty_tracking = mock_domain_set_dirty_tracking,
+};
- if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
- flags = MOCK_PFN_LAST_IOVA;
- if (pgsize != MOCK_IO_PAGE_SIZE) {
- flags |= MOCK_PFN_HUGE_IOVA;
- }
- old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
- xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
- flags),
- gfp);
- if (xa_is_err(old)) {
- for (; start_iova != iova;
- start_iova += MOCK_IO_PAGE_SIZE)
- xa_erase(&mock->pfns,
- start_iova /
- MOCK_IO_PAGE_SIZE);
- return xa_err(old);
- }
- WARN_ON(old);
- iova += MOCK_IO_PAGE_SIZE;
- paddr += MOCK_IO_PAGE_SIZE;
- *mapped += MOCK_IO_PAGE_SIZE;
- flags = 0;
- }
- }
- return 0;
-}
+static const struct iommu_domain_ops amdv1_ops = {
+ IOMMU_PT_DOMAIN_OPS(amdv1),
+ .free = mock_domain_free,
+ .attach_dev = mock_domain_nop_attach,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
+ .iotlb_sync = &mock_iotlb_sync,
+};
-static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
- unsigned long iova, size_t pgsize,
- size_t pgcount,
- struct iommu_iotlb_gather *iotlb_gather)
+static const struct iommu_dirty_ops amdv1_dirty_ops = {
+ IOMMU_PT_DIRTY_OPS(amdv1),
+ .set_dirty_tracking = mock_domain_set_dirty_tracking,
+};
+
+static struct mock_iommu_domain *
+mock_domain_alloc_pgtable(struct device *dev,
+ const struct iommu_hwpt_selftest *user_cfg, u32 flags)
{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- bool first = true;
- size_t ret = 0;
- void *ent;
+ struct mock_iommu_domain *mock;
+ int rc;
- WARN_ON(iova % MOCK_IO_PAGE_SIZE);
- WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
+ mock = kzalloc(sizeof(*mock), GFP_KERNEL);
+ if (!mock)
+ return ERR_PTR(-ENOMEM);
+ mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
- for (; pgcount; pgcount--) {
- size_t cur;
+ mock->amdv1.iommu.nid = NUMA_NO_NODE;
+
+ switch (user_cfg->pagetable_type) {
+ case MOCK_IOMMUPT_DEFAULT:
+ case MOCK_IOMMUPT_HUGE: {
+ struct pt_iommu_amdv1_cfg cfg = {};
+
+ /* The mock version has a 2k page size */
+ cfg.common.hw_max_vasz_lg2 = 56;
+ cfg.common.hw_max_oasz_lg2 = 51;
+ cfg.starting_level = 2;
+ if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
+ mock->domain.ops = &amdv1_mock_huge_ops;
+ else
+ mock->domain.ops = &amdv1_mock_ops;
+ rc = pt_iommu_amdv1_mock_init(&mock->amdv1, &cfg, GFP_KERNEL);
+ if (rc)
+ goto err_free;
+
+ /*
+ * In huge mode userspace should only provide huge pages, we
+ * have to include PAGE_SIZE for the domain to be accepted by
+ * iommufd.
+ */
+ if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
+ mock->domain.pgsize_bitmap = MOCK_HUGE_PAGE_SIZE |
+ PAGE_SIZE;
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+ mock->domain.dirty_ops = &amdv1_mock_dirty_ops;
+ break;
+ }
- for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
- ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+ case MOCK_IOMMUPT_AMDV1: {
+ struct pt_iommu_amdv1_cfg cfg = {};
+
+ cfg.common.hw_max_vasz_lg2 = 64;
+ cfg.common.hw_max_oasz_lg2 = 52;
+ cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
+ BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
+ BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
+ cfg.starting_level = 2;
+ mock->domain.ops = &amdv1_ops;
+ rc = pt_iommu_amdv1_init(&mock->amdv1, &cfg, GFP_KERNEL);
+ if (rc)
+ goto err_free;
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+ mock->domain.dirty_ops = &amdv1_dirty_ops;
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ goto err_free;
+ }
- /*
- * iommufd generates unmaps that must be a strict
- * superset of the map's performend So every
- * starting/ending IOVA should have been an iova passed
- * to map.
- *
- * This simple logic doesn't work when the HUGE_PAGE is
- * turned on since the core code will automatically
- * switch between the two page sizes creating a break in
- * the unmap calls. The break can land in the middle of
- * contiguous IOVA.
- */
- if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
- if (first) {
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_START_IOVA));
- first = false;
- }
- if (pgcount == 1 &&
- cur + MOCK_IO_PAGE_SIZE == pgsize)
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_LAST_IOVA));
- }
+ /*
+ * Override the real aperture to the MOCK aperture for test purposes.
+ */
+ if (user_cfg->pagetable_type == MOCK_IOMMUPT_DEFAULT) {
+ WARN_ON(mock->domain.geometry.aperture_start != 0);
+ WARN_ON(mock->domain.geometry.aperture_end < MOCK_APERTURE_LAST);
- iova += MOCK_IO_PAGE_SIZE;
- ret += MOCK_IO_PAGE_SIZE;
- }
+ mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
+ mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
}
- return ret;
+
+ return mock;
+err_free:
+ kfree(mock);
+ return ERR_PTR(rc);
}
-static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+static struct iommu_domain *
+mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
+ const struct iommu_user_data *user_data)
{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- void *ent;
+ bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+ const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
+ IOMMU_HWPT_ALLOC_NEST_PARENT |
+ IOMMU_HWPT_ALLOC_PASID;
+ struct mock_dev *mdev = to_mock_dev(dev);
+ bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
+ struct iommu_hwpt_selftest user_cfg = {};
+ struct mock_iommu_domain *mock;
+ int rc;
- WARN_ON(iova % MOCK_IO_PAGE_SIZE);
- ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
- WARN_ON(!ent);
- return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
+ if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (user_data && (user_data->type != IOMMU_HWPT_DATA_SELFTEST &&
+ user_data->type != IOMMU_HWPT_DATA_NONE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (user_data) {
+ rc = iommu_copy_struct_from_user(
+ &user_cfg, user_data, IOMMU_HWPT_DATA_SELFTEST, iotlb);
+ if (rc)
+ return ERR_PTR(rc);
+ }
+
+ mock = mock_domain_alloc_pgtable(dev, &user_cfg, flags);
+ if (IS_ERR(mock))
+ return ERR_CAST(mock);
+ return &mock->domain;
}
static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
@@ -955,15 +892,6 @@ static const struct iommu_ops mock_ops = {
.user_pasid_table = true,
.get_viommu_size = mock_get_viommu_size,
.viommu_init = mock_viommu_init,
- .default_domain_ops =
- &(struct iommu_domain_ops){
- .free = mock_domain_free,
- .attach_dev = mock_domain_nop_attach,
- .map_pages = mock_domain_map_pages,
- .unmap_pages = mock_domain_unmap_pages,
- .iova_to_phys = mock_domain_iova_to_phys,
- .set_dev_pasid = mock_domain_set_dev_pasid_nop,
- },
};
static void mock_domain_free_nested(struct iommu_domain *domain)
@@ -1047,7 +975,7 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
if (IS_ERR(hwpt))
return hwpt;
if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
- hwpt->domain->ops != mock_ops.default_domain_ops) {
+ hwpt->domain->owner != &mock_ops) {
iommufd_put_object(ucmd->ictx, &hwpt->obj);
return ERR_PTR(-EINVAL);
}
@@ -1088,7 +1016,6 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
{},
};
const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
- MOCK_FLAGS_DEVICE_HUGE_IOVA |
MOCK_FLAGS_DEVICE_PASID;
struct mock_dev *mdev;
int rc, i;
@@ -1277,23 +1204,25 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
{
struct iommufd_hw_pagetable *hwpt;
struct mock_iommu_domain *mock;
+ unsigned int page_size;
uintptr_t end;
int rc;
- if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
- (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
- check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
- return -EINVAL;
-
hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
if (IS_ERR(hwpt))
return PTR_ERR(hwpt);
- for (; length; length -= MOCK_IO_PAGE_SIZE) {
+ page_size = 1 << __ffs(mock->domain.pgsize_bitmap);
+ if (iova % page_size || length % page_size ||
+ (uintptr_t)uptr % page_size ||
+ check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
+ return -EINVAL;
+
+ for (; length; length -= page_size) {
struct page *pages[1];
+ phys_addr_t io_phys;
unsigned long pfn;
long npages;
- void *ent;
npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
pages);
@@ -1308,15 +1237,14 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
pfn = page_to_pfn(pages[0]);
put_page(pages[0]);
- ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
- if (!ent ||
- (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
- pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
+ io_phys = mock->domain.ops->iova_to_phys(&mock->domain, iova);
+ if (io_phys !=
+ pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
rc = -EINVAL;
goto out_put;
}
- iova += MOCK_IO_PAGE_SIZE;
- uptr += MOCK_IO_PAGE_SIZE;
+ iova += page_size;
+ uptr += page_size;
}
rc = 0;
@@ -1795,7 +1723,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
if (IS_ERR(hwpt))
return PTR_ERR(hwpt);
- if (!(mock->flags & MOCK_DIRTY_TRACK)) {
+ if (!(mock->flags & MOCK_DIRTY_TRACK) || !mock->iommu.ops->set_dirty) {
rc = -EINVAL;
goto out_put;
}
@@ -1814,22 +1742,10 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
}
for (i = 0; i < max; i++) {
- unsigned long cur = iova + i * page_size;
- void *ent, *old;
-
if (!test_bit(i, (unsigned long *)tmp))
continue;
-
- ent = xa_load(&mock->pfns, cur / page_size);
- if (ent) {
- unsigned long val;
-
- val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns, cur / page_size,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- count++;
- }
+ mock->iommu.ops->set_dirty(&mock->iommu, iova + i * page_size);
+ count++;
}
cmd->dirty.out_nr_dirty = count;
@@ -2031,6 +1947,140 @@ void iommufd_selftest_destroy(struct iommufd_object *obj)
}
}
+struct iommufd_test_dma_buf {
+ void *memory;
+ size_t length;
+ bool revoked;
+};
+
+static int iommufd_test_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ return 0;
+}
+
+static void iommufd_test_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+}
+
+static struct sg_table *
+iommufd_test_dma_buf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static void iommufd_test_dma_buf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+}
+
+static void iommufd_test_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct iommufd_test_dma_buf *priv = dmabuf->priv;
+
+ kfree(priv->memory);
+ kfree(priv);
+}
+
+static const struct dma_buf_ops iommufd_test_dmabuf_ops = {
+ .attach = iommufd_test_dma_buf_attach,
+ .detach = iommufd_test_dma_buf_detach,
+ .map_dma_buf = iommufd_test_dma_buf_map,
+ .release = iommufd_test_dma_buf_release,
+ .unmap_dma_buf = iommufd_test_dma_buf_unmap,
+};
+
+int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
+ struct dma_buf_phys_vec *phys)
+{
+ struct iommufd_test_dma_buf *priv = attachment->dmabuf->priv;
+
+ dma_resv_assert_held(attachment->dmabuf->resv);
+
+ if (attachment->dmabuf->ops != &iommufd_test_dmabuf_ops)
+ return -EOPNOTSUPP;
+
+ if (priv->revoked)
+ return -ENODEV;
+
+ phys->paddr = virt_to_phys(priv->memory);
+ phys->len = priv->length;
+ return 0;
+}
+
+static int iommufd_test_dmabuf_get(struct iommufd_ucmd *ucmd,
+ unsigned int open_flags,
+ size_t len)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct iommufd_test_dma_buf *priv;
+ struct dma_buf *dmabuf;
+ int rc;
+
+ len = ALIGN(len, PAGE_SIZE);
+ if (len == 0 || len > PAGE_SIZE * 512)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->length = len;
+ priv->memory = kzalloc(len, GFP_KERNEL);
+ if (!priv->memory) {
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ exp_info.ops = &iommufd_test_dmabuf_ops;
+ exp_info.size = len;
+ exp_info.flags = open_flags;
+ exp_info.priv = priv;
+
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ rc = PTR_ERR(dmabuf);
+ goto err_free;
+ }
+
+ return dma_buf_fd(dmabuf, open_flags);
+
+err_free:
+ kfree(priv->memory);
+ kfree(priv);
+ return rc;
+}
+
+static int iommufd_test_dmabuf_revoke(struct iommufd_ucmd *ucmd, int fd,
+ bool revoked)
+{
+ struct iommufd_test_dma_buf *priv;
+ struct dma_buf *dmabuf;
+ int rc = 0;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ if (dmabuf->ops != &iommufd_test_dmabuf_ops) {
+ rc = -EOPNOTSUPP;
+ goto err_put;
+ }
+
+ priv = dmabuf->priv;
+ dma_resv_lock(dmabuf->resv, NULL);
+ priv->revoked = revoked;
+ dma_buf_move_notify(dmabuf);
+ dma_resv_unlock(dmabuf->resv);
+
+err_put:
+ dma_buf_put(dmabuf);
+ return rc;
+}
+
int iommufd_test(struct iommufd_ucmd *ucmd)
{
struct iommu_test_cmd *cmd = ucmd->cmd;
@@ -2109,6 +2159,13 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
return iommufd_test_pasid_detach(ucmd, cmd);
case IOMMU_TEST_OP_PASID_CHECK_HWPT:
return iommufd_test_pasid_check_hwpt(ucmd, cmd);
+ case IOMMU_TEST_OP_DMABUF_GET:
+ return iommufd_test_dmabuf_get(ucmd, cmd->dmabuf_get.open_flags,
+ cmd->dmabuf_get.length);
+ case IOMMU_TEST_OP_DMABUF_REVOKE:
+ return iommufd_test_dmabuf_revoke(ucmd,
+ cmd->dmabuf_revoke.dmabuf_fd,
+ cmd->dmabuf_revoke.revoked);
default:
return -EOPNOTSUPP;
}
@@ -2202,3 +2259,5 @@ void iommufd_test_exit(void)
platform_device_unregister(selftest_iommu_dev);
debugfs_remove_recursive(dbgfs_root);
}
+
+MODULE_IMPORT_NS("GENERIC_PT_IOMMU");