summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2025-02-05 11:36:43 +0000
committerJens Axboe <axboe@kernel.dk>2025-02-17 05:34:45 -0700
commit9afe6847cff78e7f3aa8f4c920265cf298033251 (patch)
tree88fba2668a677f324767c17c857576068916ca02
parent7919292a961421bfdb22f83c16657684c96076b3 (diff)
io_uring/kbuf: remove legacy kbuf kmem cache
Remove the kmem cache used by legacy provided buffers. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/8195c207d8524d94e972c0c82de99282289f7f5c.1738724373.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c2
-rw-r--r--io_uring/io_uring.h1
-rw-r--r--io_uring/kbuf.c6
3 files changed, 2 insertions, 7 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index e34a92c73a5d..6fa1e88e40fb 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3927,8 +3927,6 @@ static int __init io_uring_init(void)
req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
- io_buf_cachep = KMEM_CACHE(io_buffer,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
BUG_ON(!iou_wq);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index ab619e63ef39..85bc8f76ca19 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -418,7 +418,6 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
}
extern struct kmem_cache *req_cachep;
-extern struct kmem_cache *io_buf_cachep;
static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
{
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index f152afdf0bc7..2e1561c9220f 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -20,8 +20,6 @@
/* BIDs are addressed by a 16-bit field in a CQE */
#define MAX_BIDS_PER_BGID (1 << 16)
-struct kmem_cache *io_buf_cachep;
-
struct io_provide_buf {
struct file *file;
__u64 addr;
@@ -411,7 +409,7 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
buf = list_entry(item, struct io_buffer, list);
- kmem_cache_free(io_buf_cachep, buf);
+ kfree(buf);
}
}
@@ -521,7 +519,7 @@ static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
spin_unlock(&ctx->completion_lock);
}
- buf = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
return -ENOMEM;
list_add_tail(&buf->list, &ctx->io_buffers_cache);