summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-11-03 11:21:39 -0700
committerJens Axboe <axboe@kernel.dk>2025-11-04 09:32:08 -0700
commit0d677936d67774f1b4ebfb3b26f207320f0fe3c6 (patch)
treee320e557cb5c79183ef29b190482246b97bc87da
parentbc82b02218204d89f26fd1fde5aed265f40453d3 (diff)
io_uring/cancel: move request/task cancelation logic into cancel.c
Move io_match_task_safe() and helpers into cancel.c, where it belongs. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/cancel.c38
-rw-r--r--io_uring/cancel.h2
-rw-r--r--io_uring/io_uring.c38
-rw-r--r--io_uring/io_uring.h3
4 files changed, 40 insertions, 41 deletions
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index 64b51e82baa2..2754ea80e288 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -384,3 +384,41 @@ int io_cancel_remove(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
io_ring_submit_unlock(ctx, issue_flags);
return nr ?: -ENOENT;
}
+
+static bool io_match_linked(struct io_kiocb *head)
+{
+ struct io_kiocb *req;
+
+ io_for_each_link(req, head) {
+ if (req->flags & REQ_F_INFLIGHT)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * As io_match_task() but protected against racing with linked timeouts.
+ * User must not hold timeout_lock.
+ */
+bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
+ bool cancel_all)
+{
+ bool matched;
+
+ if (tctx && head->tctx != tctx)
+ return false;
+ if (cancel_all)
+ return true;
+
+ if (head->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = head->ctx;
+
+ /* protect against races with linked timeouts */
+ raw_spin_lock_irq(&ctx->timeout_lock);
+ matched = io_match_linked(head);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
+ } else {
+ matched = io_match_linked(head);
+ }
+ return matched;
+}
diff --git a/io_uring/cancel.h b/io_uring/cancel.h
index 43e9bb74e9d1..6d5208e9d7a6 100644
--- a/io_uring/cancel.h
+++ b/io_uring/cancel.h
@@ -23,6 +23,8 @@ int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
+bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
+ bool cancel_all);
bool io_cancel_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
struct hlist_head *list, bool cancel_all,
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 01631b6ff442..75bd049a1efd 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -207,44 +207,6 @@ static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
}
-static bool io_match_linked(struct io_kiocb *head)
-{
- struct io_kiocb *req;
-
- io_for_each_link(req, head) {
- if (req->flags & REQ_F_INFLIGHT)
- return true;
- }
- return false;
-}
-
-/*
- * As io_match_task() but protected against racing with linked timeouts.
- * User must not hold timeout_lock.
- */
-bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
- bool cancel_all)
-{
- bool matched;
-
- if (tctx && head->tctx != tctx)
- return false;
- if (cancel_all)
- return true;
-
- if (head->flags & REQ_F_LINK_TIMEOUT) {
- struct io_ring_ctx *ctx = head->ctx;
-
- /* protect against races with linked timeouts */
- raw_spin_lock_irq(&ctx->timeout_lock);
- matched = io_match_linked(head);
- raw_spin_unlock_irq(&ctx->timeout_lock);
- } else {
- matched = io_match_linked(head);
- }
- return matched;
-}
-
static inline void req_fail_link_node(struct io_kiocb *req, int res)
{
req_set_fail(req);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index f97356ce29d0..2f4d43e69648 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -174,9 +174,6 @@ void io_queue_next(struct io_kiocb *req);
void io_task_refs_refill(struct io_uring_task *tctx);
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
-bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
- bool cancel_all);
-
void io_activate_pollwq(struct io_ring_ctx *ctx);
static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)