summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c249
1 files changed, 145 insertions, 104 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 01d75e5c47aa..078475447264 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -110,11 +110,13 @@
#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
+#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
+
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
REQ_F_ASYNC_DATA)
-#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
+#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | IO_REQ_LINK_FLAGS | \
REQ_F_REISSUE | IO_REQ_CLEAN_FLAGS)
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
@@ -131,7 +133,6 @@ struct io_defer_entry {
/* requests with any of those set should undergo io_disarm_next() */
#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
-#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
/*
* No waiters. It's larger than any valid value of the tw counter
@@ -254,7 +255,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
- req->io_task_work.func(req, &ts);
+ req->io_task_work.func(req, ts);
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
@@ -282,6 +283,17 @@ static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
return 0;
}
+static void io_free_alloc_caches(struct io_ring_ctx *ctx)
+{
+ io_alloc_cache_free(&ctx->apoll_cache, kfree);
+ io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
+ io_alloc_cache_free(&ctx->uring_cache, kfree);
+ io_alloc_cache_free(&ctx->msg_cache, kfree);
+ io_futex_cache_free(ctx);
+ io_rsrc_cache_free(ctx);
+}
+
static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
@@ -313,7 +325,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
init_waitqueue_head(&ctx->sqo_sq_wait);
INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
- INIT_LIST_HEAD(&ctx->io_buffers_cache);
ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
sizeof(struct async_poll), 0);
ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
@@ -328,6 +339,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_kiocb), 0);
ret |= io_futex_cache_init(ctx);
+ ret |= io_rsrc_cache_init(ctx);
if (ret)
goto free_ref;
init_completion(&ctx->ref_comp);
@@ -338,7 +350,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
spin_lock_init(&ctx->completion_lock);
raw_spin_lock_init(&ctx->timeout_lock);
INIT_WQ_LIST(&ctx->iopoll_list);
- INIT_LIST_HEAD(&ctx->io_buffers_comp);
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
INIT_LIST_HEAD(&ctx->ltimeout_list);
@@ -360,12 +371,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
free_ref:
percpu_ref_exit(&ctx->refs);
err:
- io_alloc_cache_free(&ctx->apoll_cache, kfree);
- io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
- io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
- io_alloc_cache_free(&ctx->uring_cache, kfree);
- io_alloc_cache_free(&ctx->msg_cache, kfree);
- io_futex_cache_free(ctx);
+ io_free_alloc_caches(ctx);
kvfree(ctx->cancel_table.hbs);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
@@ -393,11 +399,8 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
static void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- spin_lock(&req->ctx->completion_lock);
- io_kbuf_drop(req);
- spin_unlock(&req->ctx->completion_lock);
- }
+ if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
+ io_kbuf_drop_legacy(req);
if (req->flags & REQ_F_NEED_CLEANUP) {
const struct io_cold_def *def = &io_cold_defs[req->opcode];
@@ -542,7 +545,7 @@ static void io_queue_iowq(struct io_kiocb *req)
io_queue_linked_timeout(link);
}
-static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts)
+static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw)
{
io_queue_iowq(req);
}
@@ -899,7 +902,7 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
* Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
* the submitter task context, IOPOLL protects with uring_lock.
*/
- if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) {
+ if (ctx->lockless_cq) {
req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req);
return;
@@ -1021,7 +1024,7 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return nxt;
}
-static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+static void ctx_flush_and_put(struct io_ring_ctx *ctx, io_tw_token_t tw)
{
if (!ctx)
return;
@@ -1051,24 +1054,24 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
io_task_work.node);
if (req->ctx != ctx) {
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
ctx = req->ctx;
mutex_lock(&ctx->uring_lock);
percpu_ref_get(&ctx->refs);
}
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
- req, &ts);
+ req, ts);
node = next;
(*count)++;
if (unlikely(need_resched())) {
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
ctx = NULL;
cond_resched();
}
} while (node && *count < max_entries);
- ctx_flush_and_put(ctx, &ts);
+ ctx_flush_and_put(ctx, ts);
return node;
}
@@ -1157,7 +1160,7 @@ static inline void io_req_local_work_add(struct io_kiocb *req,
* We don't know how many reuqests is there in the link and whether
* they can even be queued lazily, fall back to non-lazy.
*/
- if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
+ if (req->flags & IO_REQ_LINK_FLAGS)
flags &= ~IOU_F_TWQ_LAZY_WAKE;
guard(rcu)();
@@ -1276,7 +1279,7 @@ static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
}
static int __io_run_local_work_loop(struct llist_node **node,
- struct io_tw_state *ts,
+ io_tw_token_t tw,
int events)
{
int ret = 0;
@@ -1287,7 +1290,7 @@ static int __io_run_local_work_loop(struct llist_node **node,
io_task_work.node);
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
- req, ts);
+ req, tw);
*node = next;
if (++ret >= events)
break;
@@ -1296,7 +1299,7 @@ static int __io_run_local_work_loop(struct llist_node **node,
return ret;
}
-static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
+static int __io_run_local_work(struct io_ring_ctx *ctx, io_tw_token_t tw,
int min_events, int max_events)
{
struct llist_node *node;
@@ -1309,7 +1312,7 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
again:
min_events -= ret;
- ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, max_events);
+ ret = __io_run_local_work_loop(&ctx->retry_llist.first, tw, max_events);
if (ctx->retry_llist.first)
goto retry_done;
@@ -1318,7 +1321,7 @@ again:
* running the pending items.
*/
node = llist_reverse_order(llist_del_all(&ctx->work_llist));
- ret += __io_run_local_work_loop(&node, ts, max_events - ret);
+ ret += __io_run_local_work_loop(&node, tw, max_events - ret);
ctx->retry_llist.first = node;
loops++;
@@ -1340,7 +1343,7 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
if (!io_local_work_pending(ctx))
return 0;
- return __io_run_local_work(ctx, &ts, min_events,
+ return __io_run_local_work(ctx, ts, min_events,
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
}
@@ -1351,20 +1354,20 @@ static int io_run_local_work(struct io_ring_ctx *ctx, int min_events,
int ret;
mutex_lock(&ctx->uring_lock);
- ret = __io_run_local_work(ctx, &ts, min_events, max_events);
+ ret = __io_run_local_work(ctx, ts, min_events, max_events);
mutex_unlock(&ctx->uring_lock);
return ret;
}
-static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
+static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, ts);
+ io_tw_lock(req->ctx, tw);
io_req_defer_failed(req, req->cqe.res);
}
-void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
+void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, ts);
+ io_tw_lock(req->ctx, tw);
if (unlikely(io_should_terminate_tw()))
io_req_defer_failed(req, -EFAULT);
else if (req->flags & REQ_F_FORCE_ASYNC)
@@ -1419,8 +1422,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
if (apoll->double_poll)
kfree(apoll->double_poll);
- if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
- kfree(apoll);
+ io_cache_free(&ctx->apoll_cache, apoll);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)
@@ -1582,7 +1584,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
return 0;
}
-void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
+void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw)
{
io_req_complete_defer(req);
}
@@ -1719,15 +1721,13 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
return !!req->file;
}
-static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+static inline int __io_issue_sqe(struct io_kiocb *req,
+ unsigned int issue_flags,
+ const struct io_issue_def *def)
{
- const struct io_issue_def *def = &io_issue_defs[req->opcode];
const struct cred *creds = NULL;
int ret;
- if (unlikely(!io_assign_file(req, def, issue_flags)))
- return -EBADF;
-
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
creds = override_creds(req->creds);
@@ -1742,6 +1742,19 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (creds)
revert_creds(creds);
+ return ret;
+}
+
+static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+{
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ int ret;
+
+ if (unlikely(!io_assign_file(req, def, issue_flags)))
+ return -EBADF;
+
+ ret = __io_issue_sqe(req, issue_flags, def);
+
if (ret == IOU_OK) {
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
io_req_complete_defer(req);
@@ -1762,11 +1775,26 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
return ret;
}
-int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
+int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, ts);
- return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
- IO_URING_F_COMPLETE_DEFER);
+ const unsigned int issue_flags = IO_URING_F_NONBLOCK |
+ IO_URING_F_MULTISHOT |
+ IO_URING_F_COMPLETE_DEFER;
+ int ret;
+
+ io_tw_lock(req->ctx, tw);
+
+ WARN_ON_ONCE(!req->file);
+ if (WARN_ON_ONCE(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EFAULT;
+
+ ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]);
+
+ WARN_ON_ONCE(ret == IOU_OK);
+
+ if (ret == IOU_ISSUE_SKIP_COMPLETE)
+ ret = 0;
+ return ret;
}
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
@@ -1996,9 +2024,8 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
return true;
}
-static void io_init_req_drain(struct io_kiocb *req)
+static void io_init_drain(struct io_ring_ctx *ctx)
{
- struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *head = ctx->submit_state.link.head;
ctx->drain_active = true;
@@ -2062,7 +2089,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (sqe_flags & IOSQE_IO_DRAIN) {
if (ctx->drain_disabled)
return io_init_fail_req(req, -EOPNOTSUPP);
- io_init_req_drain(req);
+ io_init_drain(ctx);
}
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
@@ -2458,8 +2485,18 @@ static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
}
+struct ext_arg {
+ size_t argsz;
+ struct timespec64 ts;
+ const sigset_t __user *sig;
+ ktime_t min_time;
+ bool ts_set;
+ bool iowait;
+};
+
static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
+ struct ext_arg *ext_arg,
ktime_t start_time)
{
int ret = 0;
@@ -2469,7 +2506,7 @@ static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
* can take into account that the task is waiting for IO - turns out
* to be important for low QD IO.
*/
- if (current_pending_io())
+ if (ext_arg->iowait && current_pending_io())
current->in_iowait = 1;
if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
@@ -2482,6 +2519,7 @@ static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
/* If this returns > 0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
+ struct ext_arg *ext_arg,
ktime_t start_time)
{
if (unlikely(READ_ONCE(ctx->check_cq)))
@@ -2495,17 +2533,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (unlikely(io_should_wake(iowq)))
return 0;
- return __io_cqring_wait_schedule(ctx, iowq, start_time);
+ return __io_cqring_wait_schedule(ctx, iowq, ext_arg, start_time);
}
-struct ext_arg {
- size_t argsz;
- struct timespec64 ts;
- const sigset_t __user *sig;
- ktime_t min_time;
- bool ts_set;
-};
-
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -2583,7 +2613,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
TASK_INTERRUPTIBLE);
}
- ret = io_cqring_wait_schedule(ctx, &iowq, start_time);
+ ret = io_cqring_wait_schedule(ctx, &iowq, ext_arg, start_time);
__set_current_state(TASK_RUNNING);
atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
@@ -2704,12 +2734,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_sqe_files_unregister(ctx);
io_cqring_overflow_kill(ctx);
io_eventfd_unregister(ctx);
- io_alloc_cache_free(&ctx->apoll_cache, kfree);
- io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
- io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
- io_alloc_cache_free(&ctx->uring_cache, kfree);
- io_alloc_cache_free(&ctx->msg_cache, kfree);
- io_futex_cache_free(ctx);
+ io_free_alloc_caches(ctx);
io_destroy_buffers(ctx);
io_free_region(ctx, &ctx->param_region);
mutex_unlock(&ctx->uring_lock);
@@ -3239,6 +3264,8 @@ static int io_get_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
const struct io_uring_getevents_arg __user *uarg = argp;
struct io_uring_getevents_arg arg;
+ ext_arg->iowait = !(flags & IORING_ENTER_NO_IOWAIT);
+
/*
* If EXT_ARG isn't set, then we have no timespec and the argp pointer
* is just a pointer to the sigset_t.
@@ -3316,7 +3343,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
IORING_ENTER_REGISTERED_RING |
IORING_ENTER_ABS_TIMER |
- IORING_ENTER_EXT_ARG_REG)))
+ IORING_ENTER_EXT_ARG_REG |
+ IORING_ENTER_NO_IOWAIT)))
return -EINVAL;
/*
@@ -3537,6 +3565,44 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
O_RDWR | O_CLOEXEC, NULL);
}
+static int io_uring_sanitise_params(struct io_uring_params *p)
+{
+ unsigned flags = p->flags;
+
+ /* There is no way to mmap rings without a real fd */
+ if ((flags & IORING_SETUP_REGISTERED_FD_ONLY) &&
+ !(flags & IORING_SETUP_NO_MMAP))
+ return -EINVAL;
+
+ if (flags & IORING_SETUP_SQPOLL) {
+ /* IPI related flags don't make sense with SQPOLL */
+ if (flags & (IORING_SETUP_COOP_TASKRUN |
+ IORING_SETUP_TASKRUN_FLAG |
+ IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
+ }
+
+ if (flags & IORING_SETUP_TASKRUN_FLAG) {
+ if (!(flags & (IORING_SETUP_COOP_TASKRUN |
+ IORING_SETUP_DEFER_TASKRUN)))
+ return -EINVAL;
+ }
+
+ /* HYBRID_IOPOLL only valid with IOPOLL */
+ if ((flags & IORING_SETUP_HYBRID_IOPOLL) && !(flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
+ /*
+ * For DEFER_TASKRUN we require the completion task to be the same as
+ * the submission task. This implies that there is only one submitter.
+ */
+ if ((flags & IORING_SETUP_DEFER_TASKRUN) &&
+ !(flags & IORING_SETUP_SINGLE_ISSUER))
+ return -EINVAL;
+
+ return 0;
+}
+
int io_uring_fill_params(unsigned entries, struct io_uring_params *p)
{
if (!entries)
@@ -3547,10 +3613,6 @@ int io_uring_fill_params(unsigned entries, struct io_uring_params *p)
entries = IORING_MAX_ENTRIES;
}
- if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
- && !(p->flags & IORING_SETUP_NO_MMAP))
- return -EINVAL;
-
/*
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
@@ -3612,6 +3674,10 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
struct file *file;
int ret;
+ ret = io_uring_sanitise_params(p);
+ if (ret)
+ return ret;
+
ret = io_uring_fill_params(entries, p);
if (unlikely(ret))
return ret;
@@ -3659,37 +3725,10 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
* For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
* COOP_TASKRUN is set, then IPIs are never needed by the app.
*/
- ret = -EINVAL;
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- /* IPI related flags don't make sense with SQPOLL */
- if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
- IORING_SETUP_TASKRUN_FLAG |
- IORING_SETUP_DEFER_TASKRUN))
- goto err;
- ctx->notify_method = TWA_SIGNAL_NO_IPI;
- } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
+ if (ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_COOP_TASKRUN))
ctx->notify_method = TWA_SIGNAL_NO_IPI;
- } else {
- if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
- !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
- goto err;
+ else
ctx->notify_method = TWA_SIGNAL;
- }
-
- /* HYBRID_IOPOLL only valid with IOPOLL */
- if ((ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_HYBRID_IOPOLL)) ==
- IORING_SETUP_HYBRID_IOPOLL)
- goto err;
-
- /*
- * For DEFER_TASKRUN we require the completion task to be the same as the
- * submission task. This implies that there is only one submitter, so enforce
- * that.
- */
- if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
- !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
- goto err;
- }
/*
* This is just grabbed for accounting purposes. When a process exits,
@@ -3719,7 +3758,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT |
- IORING_FEAT_RW_ATTR;
+ IORING_FEAT_RW_ATTR | IORING_FEAT_NO_IOWAIT;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -3915,6 +3954,9 @@ static int __init io_uring_init(void)
io_uring_optable_init();
+ /* imu->dir is u8 */
+ BUILD_BUG_ON((IO_IMU_DEST | IO_IMU_SOURCE) > U8_MAX);
+
/*
* Allow user copy in the per-command field, which starts after the
* file in io_kiocb and until the opcode field. The openat2 handling
@@ -3925,10 +3967,9 @@ static int __init io_uring_init(void)
req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
- io_buf_cachep = KMEM_CACHE(io_buffer,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
+ BUG_ON(!iou_wq);
#ifdef CONFIG_SYSCTL
register_sysctl_init("kernel", kernel_io_uring_disabled_table);