summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--io_uring/poll.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 3f1d716dcfab..aac4b3b881fb 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -138,14 +138,32 @@ static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
}
+static void io_poll_remove_waitq(struct io_poll *poll)
+{
+ /*
+ * If the waitqueue is being freed early but someone is already holds
+ * ownership over it, we have to tear down the request as best we can.
+ * That means immediately removing the request from its waitqueue and
+ * preventing all further accesses to the waitqueue via the request.
+ */
+ list_del_init(&poll->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon as req->head is
+ * NULL'ed out, the request can be completed and freed, since
+ * io_poll_remove_entry() will no longer need to take the waitqueue
+ * lock.
+ */
+ smp_store_release(&poll->head, NULL);
+}
+
static inline void io_poll_remove_entry(struct io_poll *poll)
{
struct wait_queue_head *head = smp_load_acquire(&poll->head);
if (head) {
spin_lock_irq(&head->lock);
- list_del_init(&poll->wait.entry);
- poll->head = NULL;
+ io_poll_remove_waitq(poll);
spin_unlock_irq(&head->lock);
}
}
@@ -368,23 +386,7 @@ static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
io_poll_mark_cancelled(req);
/* we have to kick tw in case it's not already */
io_poll_execute(req, 0);
-
- /*
- * If the waitqueue is being freed early but someone is already
- * holds ownership over it, we have to tear down the request as
- * best we can. That means immediately removing the request from
- * its waitqueue and preventing all further accesses to the
- * waitqueue via the request.
- */
- list_del_init(&poll->wait.entry);
-
- /*
- * Careful: this *must* be the last step, since as soon
- * as req->head is NULL'ed out, the request can be
- * completed and freed, since aio_poll_complete_work()
- * will no longer need to take the waitqueue lock.
- */
- smp_store_release(&poll->head, NULL);
+ io_poll_remove_waitq(poll);
return 1;
}
@@ -413,8 +415,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
/* optional, saves extra locking for removal in tw handler */
if (mask && poll->events & EPOLLONESHOT) {
- list_del_init(&poll->wait.entry);
- poll->head = NULL;
+ io_poll_remove_waitq(poll);
if (wqe_is_double(wait))
req->flags &= ~REQ_F_DOUBLE_POLL;
else