summaryrefslogtreecommitdiff
path: root/io_uring/rsrc.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-12-08 21:46:01 +0000
committerJens Axboe <axboe@kernel.dk>2024-12-23 08:20:31 -0700
commitde3b9e2e48190be28473ea84c384bc64931facf7 (patch)
tree17e58dadd7e6ecabb0075229d3d8142fbdeab4aa /io_uring/rsrc.c
parent2e6406a20a3999cc761a5a697a9afa7de40713e6 (diff)
io_uring: don't vmap single page regions
When io_check_coalesce_buffer() meets a single page buffer it bails out and tells that it can be coalesced. That's fine for registered buffers as io_coalesce_buffer() wouldn't change anything, but the region code now uses the function to decided on whether to vmap the buffer or not. Report that a single page buffer is trivially coalescable and let io_sqe_buffer_register() to filter them. Fixes: c4d0ac1c1567 ("io_uring/memmap: optimise single folio regions") Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/cb83e053f318857068447d40c95becebcd8aeced.1733689833.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rsrc.c')
-rw-r--r--io_uring/rsrc.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 2d2333970eb5..f2ff108485c8 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -675,14 +675,9 @@ bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
unsigned int count = 1, nr_folios = 1;
int i;
- if (nr_pages <= 1)
- return false;
-
data->nr_pages_mid = folio_nr_pages(folio);
- if (data->nr_pages_mid == 1)
- return false;
-
data->folio_shift = folio_shift(folio);
+
/*
* Check if pages are contiguous inside a folio, and all folios have
* the same page count except for the head and tail.
@@ -750,8 +745,10 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
}
/* If it's huge page(s), try to coalesce them into fewer bvec entries */
- if (io_check_coalesce_buffer(pages, nr_pages, &data))
- coalesced = io_coalesce_buffer(&pages, &nr_pages, &data);
+ if (nr_pages > 1 && io_check_coalesce_buffer(pages, nr_pages, &data)) {
+ if (data.nr_pages_mid != 1)
+ coalesced = io_coalesce_buffer(&pages, &nr_pages, &data);
+ }
imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
if (!imu)