summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c2
-rw-r--r--io_uring/register.c8
-rw-r--r--io_uring/rw.c8
-rw-r--r--io_uring/waitid.c3
-rw-r--r--io_uring/zcrx.c5
5 files changed, 15 insertions, 11 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 49ebdeb5b2d9..820ef0527666 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -29,7 +29,7 @@
*
* Also see the examples in the liburing library:
*
- * git://git.kernel.dk/liburing
+ * git://git.kernel.org/pub/scm/linux/kernel/git/axboe/liburing.git
*
* io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
* from data shared between the kernel and application. This is done both
diff --git a/io_uring/register.c b/io_uring/register.c
index 43f04c47522c..2e4717f1357c 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -421,13 +421,6 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
if (unlikely(ret))
return ret;
- /* nothing to do, but copy params back */
- if (p.sq_entries == ctx->sq_entries && p.cq_entries == ctx->cq_entries) {
- if (copy_to_user(arg, &p, sizeof(p)))
- return -EFAULT;
- return 0;
- }
-
size = rings_size(p.flags, p.sq_entries, p.cq_entries,
&sq_array_offset);
if (size == SIZE_MAX)
@@ -613,6 +606,7 @@ static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
if (ret)
return ret;
if (copy_to_user(rd_uptr, &rd, sizeof(rd))) {
+ guard(mutex)(&ctx->mmap_lock);
io_free_region(ctx, &ctx->param_region);
return -EFAULT;
}
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 08882648d569..5b2241a5813c 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -542,7 +542,7 @@ static void __io_complete_rw_common(struct io_kiocb *req, long res)
{
if (res == req->cqe.res)
return;
- if (res == -EAGAIN && io_rw_should_reissue(req)) {
+ if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) {
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
} else {
req_set_fail(req);
@@ -655,13 +655,17 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
if (ret >= 0 && req->flags & REQ_F_CUR_POS)
req->file->f_pos = rw->kiocb.ki_pos;
if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
+ u32 cflags = 0;
+
__io_complete_rw_common(req, ret);
/*
* Safe to call io_end from here as we're inline
* from the submission path.
*/
io_req_io_end(req);
- io_req_set_res(req, final_ret, io_put_kbuf(req, ret, sel->buf_list));
+ if (sel)
+ cflags = io_put_kbuf(req, ret, sel->buf_list);
+ io_req_set_res(req, final_ret, cflags);
io_req_rw_cleanup(req, issue_flags);
return IOU_COMPLETE;
} else {
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index 26c118f3918d..f25110fb1b12 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -230,13 +230,14 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
if (!pid_child_should_wake(wo, p))
return 0;
+ list_del_init(&wait->entry);
+
/* cancel is in progress */
if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
return 1;
req->io_task_work.func = io_waitid_cb;
io_req_task_work_add(req);
- list_del_init(&wait->entry);
return 1;
}
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 723e4266b91f..a816f5902091 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -1079,6 +1079,7 @@ static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
cc->size -= n;
cc->offset += n;
+ src_offset += n;
len -= n;
copied += n;
}
@@ -1236,12 +1237,16 @@ io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
end = start + frag_iter->len;
if (offset < end) {
+ size_t count;
+
copy = end - offset;
if (copy > len)
copy = len;
off = offset - start;
+ count = desc->count;
ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
+ desc->count = count;
if (ret < 0)
goto out;