summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/parisc/ccio-dma.c54
-rw-r--r--drivers/parisc/iommu-helpers.h10
-rw-r--r--drivers/parisc/sba_iommu.c54
-rw-r--r--drivers/xen/grant-dma-ops.c20
-rw-r--r--drivers/xen/swiotlb-xen.c63
5 files changed, 100 insertions, 101 deletions
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index feef537257d0..4e7071714356 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -517,10 +517,10 @@ static u32 hint_lookup[] = {
* ccio_io_pdir_entry - Initialize an I/O Pdir.
* @pdir_ptr: A pointer into I/O Pdir.
* @sid: The Space Identifier.
- * @vba: The virtual address.
+ * @pba: The physical address.
* @hints: The DMA Hint.
*
- * Given a virtual address (vba, arg2) and space id, (sid, arg1),
+ * Given a physical address (pba, arg2) and space id, (sid, arg1),
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
* entry consists of 8 bytes as shown below (MSB == bit 0):
*
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
* index are bits 12:19 of the value returned by LCI.
*/
static void
-ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
unsigned long hints)
{
register unsigned long pa;
@@ -557,7 +557,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well.
*/
- pa = lpa(vba);
+ pa = pba;
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa;
@@ -582,7 +582,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
** Grab virtual index [0:11]
** Deposit virt_idx bits into I/O PDIR word
*/
- asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
@@ -704,14 +704,14 @@ ccio_dma_supported(struct device *dev, u64 mask)
/**
* ccio_map_single - Map an address range into the IOMMU.
* @dev: The PCI device.
- * @addr: The start address of the DMA region.
+ * @addr: The physical address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_map_single function.
*/
static dma_addr_t
-ccio_map_single(struct device *dev, void *addr, size_t size,
+ccio_map_single(struct device *dev, phys_addr_t addr, size_t size,
enum dma_data_direction direction)
{
int idx;
@@ -730,7 +730,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(size <= 0);
/* save offset bits */
- offset = ((unsigned long) addr) & ~IOVP_MASK;
+ offset = offset_in_page(addr);
/* round up to nearest IOVP_SIZE */
size = ALIGN(size + offset, IOVP_SIZE);
@@ -746,15 +746,15 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
pdir_start = &(ioc->pdir_base[idx]);
- DBG_RUN("%s() %px -> %#lx size: %zu\n",
- __func__, addr, (long)(iovp | offset), size);
+ DBG_RUN("%s() %pa -> %#lx size: %zu\n",
+ __func__, &addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
- if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
+ if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES))
hint |= HINT_SAFE_DMA;
while(size > 0) {
- ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
+ ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
DBG_RUN(" pdir %p %08x%08x\n",
pdir_start,
@@ -773,17 +773,18 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
-ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
{
- return ccio_map_single(dev, page_address(page) + offset, size,
- direction);
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
+ return ccio_map_single(dev, phys, size, direction);
}
/**
- * ccio_unmap_page - Unmap an address range from the IOMMU.
+ * ccio_unmap_phys - Unmap an address range from the IOMMU.
* @dev: The PCI device.
* @iova: The start address of the DMA region.
* @size: The length of the DMA region.
@@ -791,7 +792,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
* @attrs: attributes
*/
static void
-ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -853,7 +854,8 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
if (ret) {
memset(ret, 0, size);
- *dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
+ *dma_handle = ccio_map_single(dev, virt_to_phys(ret), size,
+ DMA_BIDIRECTIONAL);
}
return ret;
@@ -873,7 +875,7 @@ static void
ccio_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, unsigned long attrs)
{
- ccio_unmap_page(dev, dma_handle, size, 0, 0);
+ ccio_unmap_phys(dev, dma_handle, size, 0, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -920,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = ccio_map_single(dev,
- sg_virt(sglist), sglist->length,
+ sg_phys(sglist), sglist->length,
direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -1004,7 +1006,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
- ccio_unmap_page(dev, sg_dma_address(sglist),
+ ccio_unmap_phys(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
nents--;
@@ -1017,8 +1019,8 @@ static const struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported,
.alloc = ccio_alloc,
.free = ccio_free,
- .map_page = ccio_map_page,
- .unmap_page = ccio_unmap_page,
+ .map_phys = ccio_map_phys,
+ .unmap_phys = ccio_unmap_phys,
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
@@ -1072,7 +1074,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
ioc->msingle_calls, ioc->msingle_pages,
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
- /* KLUGE - unmap_sg calls unmap_page for each mapped page */
+ /* KLUGE - unmap_sg calls unmap_phys for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
max = ioc->usingle_pages - ioc->usg_pages;
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index c43f1a212a5c..0691884f5095 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -14,7 +14,7 @@
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint,
- void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
+ void (*iommu_io_pdir_entry)(__le64 *, space_t, phys_addr_t,
unsigned long))
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
@@ -28,7 +28,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
dma_sg--;
while (nents-- > 0) {
- unsigned long vaddr;
+ phys_addr_t paddr;
long size;
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
@@ -67,7 +67,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
BUG_ON(pdirp == NULL);
- vaddr = (unsigned long)sg_virt(startsg);
+ paddr = sg_phys(startsg);
sg_dma_len(dma_sg) += startsg->length;
size = startsg->length + dma_offset;
dma_offset = 0;
@@ -76,8 +76,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
#endif
do {
iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
- vaddr, hint);
- vaddr += IOVP_SIZE;
+ paddr, hint);
+ paddr += IOVP_SIZE;
size -= IOVP_SIZE;
pdirp++;
} while(unlikely(size > 0));
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index fc3863c09f83..a6eb6bffa5ea 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -532,7 +532,7 @@ typedef unsigned long space_t;
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
* @sid: process Space ID - currently only support KERNEL_SPACE
- * @vba: Virtual CPU address of buffer to map
+ * @pba: Physical address of buffer to map
* @hint: DMA hint set to use for this mapping
*
* SBA Mapping Routine
@@ -569,20 +569,17 @@ typedef unsigned long space_t;
*/
static void
-sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
unsigned long hint)
{
- u64 pa; /* physical address */
register unsigned ci; /* coherent index */
- pa = lpa(vba);
- pa &= IOVP_MASK;
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
+ pba &= IOVP_MASK;
+ pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
- asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
- pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
-
- pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
- *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
+ pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+ *pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
@@ -707,7 +704,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
* See Documentation/core-api/dma-api-howto.rst
*/
static dma_addr_t
-sba_map_single(struct device *dev, void *addr, size_t size,
+sba_map_single(struct device *dev, phys_addr_t addr, size_t size,
enum dma_data_direction direction)
{
struct ioc *ioc;
@@ -722,7 +719,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
return DMA_MAPPING_ERROR;
/* save offset bits */
- offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
+ offset = offset_in_page(addr);
/* round up to nearest IOVP_SIZE */
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
@@ -739,13 +736,13 @@ sba_map_single(struct device *dev, void *addr, size_t size,
pide = sba_alloc_range(ioc, dev, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
- DBG_RUN("%s() 0x%p -> 0x%lx\n",
- __func__, addr, (long) iovp | offset);
+ DBG_RUN("%s() 0x%pa -> 0x%lx\n",
+ __func__, &addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
while (size > 0) {
- sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
+ sba_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, 0);
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
pdir_start,
@@ -778,17 +775,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
-sba_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+sba_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
{
- return sba_map_single(dev, page_address(page) + offset, size,
- direction);
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
+ return sba_map_single(dev, phys, size, direction);
}
/**
- * sba_unmap_page - unmap one IOVA and free resources
+ * sba_unmap_phys - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
@@ -798,7 +796,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
* See Documentation/core-api/dma-api-howto.rst
*/
static void
-sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -893,7 +891,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
if (ret) {
memset(ret, 0, size);
- *dma_handle = sba_map_single(hwdev, ret, size, 0);
+ *dma_handle = sba_map_single(hwdev, virt_to_phys(ret), size, 0);
}
return ret;
@@ -914,7 +912,7 @@ static void
sba_free(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_page(hwdev, dma_handle, size, 0, 0);
+ sba_unmap_phys(hwdev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -962,7 +960,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
- sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
+ sg_dma_address(sglist) = sba_map_single(dev, sg_phys(sglist),
sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -1061,7 +1059,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
while (nents && sg_dma_len(sglist)) {
- sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
+ sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
@@ -1085,8 +1083,8 @@ static const struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported,
.alloc = sba_alloc,
.free = sba_free,
- .map_page = sba_map_page,
- .unmap_page = sba_unmap_page,
+ .map_phys = sba_map_phys,
+ .unmap_phys = sba_unmap_phys,
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 29257d2639db..14077d23f2a1 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -163,18 +163,22 @@ static void xen_grant_dma_free_pages(struct device *dev, size_t size,
xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
}
-static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
+static dma_addr_t xen_grant_dma_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
+ unsigned long offset = offset_in_page(phys);
unsigned long dma_offset = xen_offset_in_page(offset),
pfn_offset = XEN_PFN_DOWN(offset);
unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
grant_ref_t grant;
dma_addr_t dma_handle;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
if (WARN_ON(dir == DMA_NONE))
return DMA_MAPPING_ERROR;
@@ -190,7 +194,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
for (i = 0; i < n_pages; i++) {
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
- pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
+ pfn_to_gfn(page_to_xen_pfn(phys_to_page(phys)) + i + pfn_offset),
dir == DMA_TO_DEVICE);
}
@@ -199,7 +203,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
return dma_handle;
}
-static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+static void xen_grant_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@@ -242,7 +246,7 @@ static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
return;
for_each_sg(sg, s, nents, i)
- xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
+ xen_grant_dma_unmap_phys(dev, s->dma_address, sg_dma_len(s), dir,
attrs);
}
@@ -257,7 +261,7 @@ static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
return -EINVAL;
for_each_sg(sg, s, nents, i) {
- s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
+ s->dma_address = xen_grant_dma_map_phys(dev, sg_phys(s),
s->length, dir, attrs);
if (s->dma_address == DMA_MAPPING_ERROR)
goto out;
@@ -286,8 +290,8 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.free_pages = xen_grant_dma_free_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .map_page = xen_grant_dma_map_page,
- .unmap_page = xen_grant_dma_unmap_page,
+ .map_phys = xen_grant_dma_map_phys,
+ .unmap_phys = xen_grant_dma_unmap_phys,
.map_sg = xen_grant_dma_map_sg,
.unmap_sg = xen_grant_dma_unmap_sg,
.dma_supported = xen_grant_dma_supported,
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index dd7747a2de87..ccf25027bec1 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
- * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
+ * either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed.
*/
-static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
+static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
+ dma_addr_t dev_addr;
+ phys_addr_t map;
BUG_ON(dir == DMA_NONE);
+
+ if (attrs & DMA_ATTR_MMIO) {
+ if (unlikely(!dma_capable(dev, phys, size, false))) {
+ dev_err_once(
+ dev,
+ "DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n",
+ &phys, size, *dev->dma_mask,
+ dev->bus_dma_limit);
+ WARN_ON_ONCE(1);
+ return DMA_MAPPING_ERROR;
+ }
+ return phys;
+ }
+
+ dev_addr = xen_phys_to_dma(dev, phys);
+
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
@@ -257,13 +272,13 @@ done:
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous xen_swiotlb_map_page call. All
+ * match what was provided for in a previous xen_swiotlb_map_phys call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
@@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
+ * concerning calls here are the same as for swiotlb_unmap_phys() above.
*/
static void
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
@@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
+ xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg),
dir, attrs);
}
@@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
- sg->offset, sg->length, dir, attrs);
+ sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg),
+ sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(sg) = sg->length;
@@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
}
}
-static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr = paddr;
-
- if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
- dev_err_once(dev,
- "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
- WARN_ON_ONCE(1);
- return DMA_MAPPING_ERROR;
- }
-
- return dma_addr;
-}
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
.map_sg = xen_swiotlb_map_sg,
.unmap_sg = xen_swiotlb_unmap_sg,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
+ .map_phys = xen_swiotlb_map_phys,
+ .unmap_phys = xen_swiotlb_unmap_phys,
.dma_supported = xen_swiotlb_dma_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
- .map_resource = xen_swiotlb_direct_map_resource,
};