diff options
Diffstat (limited to 'drivers/iommu/riscv/iommu.c')
-rw-r--r-- | drivers/iommu/riscv/iommu.c | 43 |
1 files changed, 22 insertions, 21 deletions
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c index 8f049d4a0e2c..bb57092ca901 100644 --- a/drivers/iommu/riscv/iommu.c +++ b/drivers/iommu/riscv/iommu.c @@ -48,14 +48,13 @@ static DEFINE_IDA(riscv_iommu_pscids); /* Device resource-managed allocations */ struct riscv_iommu_devres { void *addr; - int order; }; static void riscv_iommu_devres_pages_release(struct device *dev, void *res) { struct riscv_iommu_devres *devres = res; - iommu_free_pages(devres->addr, devres->order); + iommu_free_pages(devres->addr); } static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p) @@ -66,13 +65,14 @@ static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p return devres->addr == target->addr; } -static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order) +static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, + unsigned int size) { struct riscv_iommu_devres *devres; void *addr; - addr = iommu_alloc_pages_node(dev_to_node(iommu->dev), - GFP_KERNEL_ACCOUNT, order); + addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev), + GFP_KERNEL_ACCOUNT, size); if (unlikely(!addr)) return NULL; @@ -80,12 +80,11 @@ static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order) sizeof(struct riscv_iommu_devres), GFP_KERNEL); if (unlikely(!devres)) { - iommu_free_pages(addr, order); + iommu_free_pages(addr); return NULL; } devres->addr = addr; - devres->order = order; devres_add(iommu->dev, devres); @@ -163,9 +162,9 @@ static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu, } else { do { const size_t queue_size = entry_size << (logsz + 1); - const int order = get_order(queue_size); - queue->base = riscv_iommu_get_pages(iommu, order); + queue->base = riscv_iommu_get_pages( + iommu, max(queue_size, SZ_4K)); queue->phys = __pa(queue->base); } while (!queue->base && logsz-- > 0); } @@ -620,7 +619,7 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm break; } - ptr = riscv_iommu_get_pages(iommu, 0); + ptr = riscv_iommu_get_pages(iommu, SZ_4K); if (!ptr) return NULL; @@ -700,7 +699,7 @@ static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu) } if (!iommu->ddt_root) { - iommu->ddt_root = riscv_iommu_get_pages(iommu, 0); + iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K); iommu->ddt_phys = __pa(iommu->ddt_root); } @@ -1087,7 +1086,8 @@ static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain, #define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot)) static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, - unsigned long pte, struct list_head *freelist) + unsigned long pte, + struct iommu_pages_list *freelist) { unsigned long *ptr; int i; @@ -1105,9 +1105,9 @@ static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, } if (freelist) - list_add_tail(&virt_to_page(ptr)->lru, freelist); + iommu_pages_list_add(freelist, ptr); else - iommu_free_page(ptr); + iommu_free_pages(ptr); } static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain, @@ -1144,13 +1144,14 @@ pte_retry: * page table. This might race with other mappings, retry. */ if (_io_pte_none(pte)) { - addr = iommu_alloc_page_node(domain->numa_node, gfp); + addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp, + SZ_4K); if (!addr) return NULL; old = pte; pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE); if (cmpxchg_relaxed(ptr, old, pte) != old) { - iommu_free_page(addr); + iommu_free_pages(addr); goto pte_retry; } } @@ -1194,7 +1195,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain, unsigned long *ptr; unsigned long pte, old, pte_prot; int rc = 0; - LIST_HEAD(freelist); + struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist); if (!(prot & IOMMU_WRITE)) pte_prot = _PAGE_BASE | _PAGE_READ; @@ -1225,7 +1226,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain, *mapped = size; - if (!list_empty(&freelist)) { + if (!iommu_pages_list_empty(&freelist)) { /* * In 1.0 spec version, the smallest scope we can use to * invalidate all levels of page table (i.e. leaf and non-leaf) @@ -1385,8 +1386,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev) domain->numa_node = dev_to_node(iommu->dev); domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD); domain->pgd_mode = pgd_mode; - domain->pgd_root = iommu_alloc_page_node(domain->numa_node, - GFP_KERNEL_ACCOUNT); + domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node, + GFP_KERNEL_ACCOUNT, SZ_4K); if (!domain->pgd_root) { kfree(domain); return ERR_PTR(-ENOMEM); @@ -1395,7 +1396,7 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev) domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1, RISCV_IOMMU_MAX_PSCID, GFP_KERNEL); if (domain->pscid < 0) { - iommu_free_page(domain->pgd_root); + iommu_free_pages(domain->pgd_root); kfree(domain); return ERR_PTR(-ENOMEM); } |