summaryrefslogtreecommitdiff
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2025-02-05 11:36:42 +0000
committerJens Axboe <axboe@kernel.dk>2025-02-17 05:34:45 -0700
commit7919292a961421bfdb22f83c16657684c96076b3 (patch)
treed7aa8edf8b564f426799f9f276c6b9d883c9c993 /io_uring/kbuf.c
parent92a3bac9a57c39728226ab191859c85f5e2829c0 (diff)
io_uring/kbuf: remove legacy kbuf bulk allocation
Legacy provided buffers are slow and discouraged in favour of the ring variant. Remove the bulk allocation to keep it simpler as we don't care about performance. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/a064d70370e590efed8076e9501ae4cfc20fe0ca.1738724373.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c30
1 files changed, 5 insertions, 25 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 8e72de7712ac..f152afdf0bc7 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -501,12 +501,9 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0;
}
-#define IO_BUFFER_ALLOC_BATCH 64
-
static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
{
- struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
- int allocated;
+ struct io_buffer *buf;
/*
* Completions that don't happen inline (eg not under uring_lock) will
@@ -524,27 +521,10 @@ static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
spin_unlock(&ctx->completion_lock);
}
- /*
- * No free buffers and no completion entries either. Allocate a new
- * batch of buffer entries and add those to our freelist.
- */
-
- allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
- ARRAY_SIZE(bufs), (void **) bufs);
- if (unlikely(!allocated)) {
- /*
- * Bulk alloc is all-or-nothing. If we fail to get a batch,
- * retry single alloc to be on the safe side.
- */
- bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
- if (!bufs[0])
- return -ENOMEM;
- allocated = 1;
- }
-
- while (allocated)
- list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
-
+ buf = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ list_add_tail(&buf->list, &ctx->io_buffers_cache);
return 0;
}