diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2024-11-29 13:34:37 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-12-23 08:17:16 -0700 |
commit | 90175f3f503213903b00bc7ba9f8ae436fc5c00e (patch) | |
tree | 6db652fa22044fae06ce09cd98c98cbc2c30e288 /io_uring/kbuf.c | |
parent | 78fda3d056417ccb9921663383b12f771aa0dd43 (diff) |
io_uring/kbuf: remove pbuf ring refcounting
struct io_buffer_list refcounting was needed for RCU based sync with
mmap, now we can kill it.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4a9cc54bf0077bb2bf2f3daf917549ddd41080da.1732886067.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r-- | io_uring/kbuf.c | 21 |
1 files changed, 7 insertions, 14 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 662e928cc3b0..644f61445ec9 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -48,7 +48,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx, * always under the ->uring_lock, but lookups from mmap do. */ bl->bgid = bgid; - atomic_set(&bl->refs, 1); guard(mutex)(&ctx->mmap_lock); return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); } @@ -385,12 +384,10 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, return i; } -void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) +static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) { - if (atomic_dec_and_test(&bl->refs)) { - __io_remove_buffers(ctx, bl, -1U); - kfree(bl); - } + __io_remove_buffers(ctx, bl, -1U); + kfree(bl); } void io_destroy_buffers(struct io_ring_ctx *ctx) @@ -804,10 +801,8 @@ struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, bl = xa_load(&ctx->io_bl_xa, bgid); /* must be a mmap'able buffer ring and have pages */ - if (bl && bl->flags & IOBL_MMAP) { - if (atomic_inc_not_zero(&bl->refs)) - return bl; - } + if (bl && bl->flags & IOBL_MMAP) + return bl; return ERR_PTR(-EINVAL); } @@ -817,7 +812,7 @@ int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma) struct io_ring_ctx *ctx = file->private_data; loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT; struct io_buffer_list *bl; - int bgid, ret; + int bgid; lockdep_assert_held(&ctx->mmap_lock); @@ -826,7 +821,5 @@ int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma) if (IS_ERR(bl)) return PTR_ERR(bl); - ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages); - io_put_bl(ctx, bl); - return ret; + return io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages); } |