summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-03-28 15:07:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-03-28 15:07:04 -0700
commiteff5f16bfd87ae48c56751741af41a825d5d4618 (patch)
tree9b1e58d1038902a754107b35621d428ba24f5165 /io_uring/uring_cmd.c
parent6df9d086ffcb6b0521872fef5f9f4dd1907abb9a (diff)
parent6889ae1b4df1579bcdffef023e2ea9a982565dff (diff)
Merge tag 'for-6.15/io_uring-reg-vec-20250327' of git://git.kernel.dk/linux
Pull more io_uring updates from Jens Axboe: "Final separate updates for io_uring. This started out as a series of cleanups improvements and improvements for registered buffers, but as the last series of the io_uring changes for 6.15, it also collected a few fixes for the other branches on top: - Add support for vectored fixed/registered buffers. Previously only single segments have been supported for commands, now vectored variants are supported as well. This series includes networking and file read/write support. - Small series unifying return codes across multi and single shot. - Small series cleaning up registerd buffer importing. - Adding support for vectored registered buffers for uring_cmd. - Fix for io-wq handling of command reissue. - Various little fixes and tweaks" * tag 'for-6.15/io_uring-reg-vec-20250327' of git://git.kernel.dk/linux: (25 commits) io_uring/net: fix io_req_post_cqe abuse by send bundle io_uring/net: use REQ_F_IMPORT_BUFFER for send_zc io_uring: move min_events sanitisation io_uring: rename "min" arg in io_iopoll_check() io_uring: open code __io_post_aux_cqe() io_uring: defer iowq cqe overflow via task_work io_uring: fix retry handling off iowq io_uring/net: only import send_zc buffer once io_uring/cmd: introduce io_uring_cmd_import_fixed_vec io_uring/cmd: add iovec cache for commands io_uring/cmd: don't expose entire cmd async data io_uring: rename the data cmd cache io_uring: rely on io_prep_reg_vec for iovec placement io_uring: introduce io_prep_reg_iovec() io_uring: unify STOP_MULTISHOT with IOU_OK io_uring: return -EAGAIN to continue multishot io_uring: cap cached iovec/bvec size io_uring/net: implement vectored reg bufs for zctx io_uring/net: convert to struct iou_vec io_uring/net: pull vec alloc out of msghdr import ...
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c59
1 files changed, 50 insertions, 9 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index de39b602aa82..f2cfc371f3d0 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -16,10 +16,19 @@
#include "rsrc.h"
#include "uring_cmd.h"
+void io_cmd_cache_free(const void *entry)
+{
+ struct io_async_cmd *ac = (struct io_async_cmd *)entry;
+
+ io_vec_free(&ac->vec);
+ kfree(ac);
+}
+
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_uring_cmd_data *cache = req->async_data;
+ struct io_async_cmd *ac = req->async_data;
+ struct io_uring_cmd_data *cache = &ac->data;
if (cache->op_data) {
kfree(cache->op_data);
@@ -28,13 +37,23 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
if (issue_flags & IO_URING_F_UNLOCKED)
return;
- if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
+
+ io_alloc_cache_vec_kasan(&ac->vec);
+ if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
+ io_vec_free(&ac->vec);
+
+ if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
ioucmd->sqe = NULL;
req->async_data = NULL;
- req->flags &= ~REQ_F_ASYNC_DATA;
+ req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
}
}
+void io_uring_cmd_cleanup(struct io_kiocb *req)
+{
+ io_req_uring_cleanup(req, 0);
+}
+
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
struct io_uring_task *tctx, bool cancel_all)
{
@@ -169,12 +188,15 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_uring_cmd_data *cache;
+ struct io_async_cmd *ac;
+
+ /* see io_uring_cmd_get_async_data() */
+ BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
- cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
- if (!cache)
+ ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
+ if (!ac)
return -ENOMEM;
- cache->op_data = NULL;
+ ac->data.op_data = NULL;
/*
* Unconditionally cache the SQE for now - this is only needed for
@@ -183,8 +205,8 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
* that it doesn't read in per-op data, play it safe and ensure that
* any SQE data is stable beyond prep. This can later get relaxed.
*/
- memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = cache->sqes;
+ memcpy(ac->data.sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = ac->data.sqes;
return 0;
}
@@ -255,6 +277,25 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
+int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ struct io_async_cmd *ac = req->async_data;
+ int ret;
+
+ ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
+ if (ret)
+ return ret;
+
+ return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
+ issue_flags);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
+
void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);