summaryrefslogtreecommitdiff
path: root/io_uring/memmap.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-11-29 13:34:28 +0000
committerJens Axboe <axboe@kernel.dk>2024-12-23 08:17:16 -0700
commitc4d0ac1c1567ee822529124a3dc10b384838c3bc (patch)
tree9655ea72d41aed37f8d3f414432b57b02d4ecd1a /io_uring/memmap.c
parent226ae1b4d1111b0b0041677b58371af9b8cd31a9 (diff)
io_uring/memmap: optimise single folio regions
We don't need to vmap if memory is already physically contiguous. There are two important cases it covers: PAGE_SIZE regions and huge pages. Use io_check_coalesce_buffer() to get the number of contiguous folios. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/d5240af23064a824c29d14d2406f1ae764bf4505.1732886067.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/memmap.c')
-rw-r--r--io_uring/memmap.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 96c4f6b61171..fd348c98f64f 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -226,12 +226,31 @@ void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
memset(mr, 0, sizeof(*mr));
}
+static int io_region_init_ptr(struct io_mapped_region *mr)
+{
+ struct io_imu_folio_data ifd;
+ void *ptr;
+
+ if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) {
+ if (ifd.nr_folios == 1) {
+ mr->ptr = page_address(mr->pages[0]);
+ return 0;
+ }
+ }
+ ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ mr->ptr = ptr;
+ mr->flags |= IO_REGION_F_VMAP;
+ return 0;
+}
+
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
struct io_uring_region_desc *reg)
{
struct page **pages;
int nr_pages, ret;
- void *vptr;
u64 end;
if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
@@ -267,13 +286,9 @@ int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
mr->pages = pages;
mr->flags |= IO_REGION_F_USER_PROVIDED;
- vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
- if (!vptr) {
- ret = -ENOMEM;
+ ret = io_region_init_ptr(mr);
+ if (ret)
goto out_free;
- }
- mr->ptr = vptr;
- mr->flags |= IO_REGION_F_VMAP;
return 0;
out_free:
io_free_region(ctx, mr);