summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2025-08-21 14:46:34 +0800
committerMichael S. Tsirkin <mst@redhat.com>2025-10-01 07:24:43 -0400
commit447beec8065b7505c09d1c16257ff9057ec29672 (patch)
tree2bb2fb272a75fc3348edc9563d77a9550a851fe3
parent7d096cb3e16f09d9762ae8cef897cdbc13a40029 (diff)
virtio_ring: switch to use dma_{map|unmap}_page()
This patch switches to use dma_{map|unmap}_page() to reduce the coverage of DMA operations. This would help for the following rework on the virtio map operations. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20250821064641.5025-3-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
-rw-r--r--drivers/virtio/virtio_ring.c55
1 files changed, 23 insertions, 32 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 103bad8cffff..75e5f6336c8d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -405,8 +405,8 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
if (!vq->use_dma_api)
return (dma_addr_t)virt_to_phys(cpu_addr);
- return dma_map_single(vring_dma_dev(vq),
- cpu_addr, size, direction);
+ return virtqueue_dma_map_single_attrs(&vq->vq, cpu_addr,
+ size, direction, 0);
}
static int vring_mapping_error(const struct vring_virtqueue *vq,
@@ -451,22 +451,14 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
if (flags & VRING_DESC_F_INDIRECT) {
if (!vq->use_dma_api)
goto out;
+ } else if (!vring_need_unmap_buffer(vq, extra))
+ goto out;
- dma_unmap_single(vring_dma_dev(vq),
- extra->addr,
- extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- if (!vring_need_unmap_buffer(vq, extra))
- goto out;
-
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr,
- extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
+ dma_unmap_page(vring_dma_dev(vq),
+ extra->addr,
+ extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
out:
return extra->next;
@@ -1276,20 +1268,13 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
if (flags & VRING_DESC_F_INDIRECT) {
if (!vq->use_dma_api)
return;
+ } else if (!vring_need_unmap_buffer(vq, extra))
+ return;
- dma_unmap_single(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- if (!vring_need_unmap_buffer(vq, extra))
- return;
-
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
+ dma_unmap_page(vring_dma_dev(vq),
+ extra->addr, extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -3161,7 +3146,13 @@ dma_addr_t virtqueue_dma_map_single_attrs(const struct virtqueue *_vq, void *ptr
return (dma_addr_t)virt_to_phys(ptr);
}
- return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
+ /* DMA must never operate on areas that might be remapped. */
+ if (dev_WARN_ONCE(&_vq->vdev->dev, is_vmalloc_addr(ptr),
+ "rejecting DMA map of vmalloc memory\n"))
+ return DMA_MAPPING_ERROR;
+
+ return dma_map_page_attrs(vring_dma_dev(vq), virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
@@ -3186,7 +3177,7 @@ void virtqueue_dma_unmap_single_attrs(const struct virtqueue *_vq,
if (!vq->use_dma_api)
return;
- dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
+ dma_unmap_page_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);