diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-04-25 11:31:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-04-25 11:31:47 -0700 |
commit | 0537fbb6ecae857ee862e88a6ead1ff2f918b67f (patch) | |
tree | 6a2011ca7c7a2952260a4e67a711b46eeb543d89 | |
parent | 6e3597f12dce7d5041e604fec3602493e38c330a (diff) | |
parent | edd43f4d6f50ec3de55a0c9e9df6348d1da51965 (diff) |
Merge tag 'io_uring-6.15-20250424' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe:
- Fix an older bug for handling of fallback task_work, when the task is
exiting. Found by code inspection while reworking cancelation.
- Fix duplicate flushing in one of the CQE posting helpers.
* tag 'io_uring-6.15-20250424' of git://git.kernel.dk/linux:
io_uring: fix 'sync' handling of io_fallback_tw()
io_uring: don't duplicate flushing in io_req_post_cqe
-rw-r--r-- | io_uring/io_uring.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c6209fe44cb1..a2b256e96d5d 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -872,10 +872,15 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) lockdep_assert(!io_wq_current_is_worker()); lockdep_assert_held(&ctx->uring_lock); - __io_cq_lock(ctx); - posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); + if (!ctx->lockless_cq) { + spin_lock(&ctx->completion_lock); + posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); + spin_unlock(&ctx->completion_lock); + } else { + posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); + } + ctx->submit_state.cq_flush = true; - __io_cq_unlock_post(ctx); return posted; } @@ -1078,21 +1083,22 @@ static __cold void __io_fallback_tw(struct llist_node *node, bool sync) while (node) { req = container_of(node, struct io_kiocb, io_task_work.node); node = node->next; - if (sync && last_ctx != req->ctx) { + if (last_ctx != req->ctx) { if (last_ctx) { - flush_delayed_work(&last_ctx->fallback_work); + if (sync) + flush_delayed_work(&last_ctx->fallback_work); percpu_ref_put(&last_ctx->refs); } last_ctx = req->ctx; percpu_ref_get(&last_ctx->refs); } - if (llist_add(&req->io_task_work.node, - &req->ctx->fallback_llist)) - schedule_delayed_work(&req->ctx->fallback_work, 1); + if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist)) + schedule_delayed_work(&last_ctx->fallback_work, 1); } if (last_ctx) { - flush_delayed_work(&last_ctx->fallback_work); + if (sync) + flush_delayed_work(&last_ctx->fallback_work); percpu_ref_put(&last_ctx->refs); } } |