diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2025-02-05 11:36:45 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2025-02-17 05:34:45 -0700 |
commit | dc39fb1093ea33019f192c93b77b863282e10162 (patch) | |
tree | 88c99289c9780a6e90578148709ba62edb29d076 /io_uring/kbuf.c | |
parent | dd4fbb11e7ccc15dbb197a5bbfb2ca8bfda89fcd (diff) |
io_uring/kbuf: simplify __io_put_kbuf
As a preparation step remove an optimisation from __io_put_kbuf() trying
to use the locked cache. With that __io_put_kbuf_list() is only used
with ->io_buffers_comp, and we remove the explicit list argument.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1b7f1394ec4afc7f96b35a61f5992e27c49fd067.1738724373.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r-- | io_uring/kbuf.c | 26 |
1 files changed, 3 insertions, 23 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 2e1561c9220f..3a43af9f7061 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -70,29 +70,9 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags) { - /* - * We can add this buffer back to two lists: - * - * 1) The io_buffers_cache list. This one is protected by the - * ctx->uring_lock. If we already hold this lock, add back to this - * list as we can grab it from issue as well. - * 2) The io_buffers_comp list. This one is protected by the - * ctx->completion_lock. - * - * We migrate buffers from the comp_list to the issue cache list - * when we need one. - */ - if (issue_flags & IO_URING_F_UNLOCKED) { - struct io_ring_ctx *ctx = req->ctx; - - spin_lock(&ctx->completion_lock); - __io_put_kbuf_list(req, len, &ctx->io_buffers_comp); - spin_unlock(&ctx->completion_lock); - } else { - lockdep_assert_held(&req->ctx->uring_lock); - - __io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache); - } + spin_lock(&req->ctx->completion_lock); + __io_put_kbuf_list(req, len); + spin_unlock(&req->ctx->completion_lock); } static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, |