diff options
Diffstat (limited to 'tools/testing/selftests/ublk')
22 files changed, 921 insertions, 221 deletions
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile index ec4624a283bc..5d7f4ecfb816 100644 --- a/tools/testing/selftests/ublk/Makefile +++ b/tools/testing/selftests/ublk/Makefile @@ -1,6 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir) +CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir)/usr/include +ifneq ($(WERROR),0) + CFLAGS += -Werror +endif + LDLIBS += -lpthread -lm -luring TEST_PROGS := test_generic_01.sh @@ -9,6 +13,13 @@ TEST_PROGS += test_generic_03.sh TEST_PROGS += test_generic_04.sh TEST_PROGS += test_generic_05.sh TEST_PROGS += test_generic_06.sh +TEST_PROGS += test_generic_07.sh + +TEST_PROGS += test_generic_08.sh +TEST_PROGS += test_generic_09.sh +TEST_PROGS += test_generic_10.sh +TEST_PROGS += test_generic_11.sh +TEST_PROGS += test_generic_12.sh TEST_PROGS += test_null_01.sh TEST_PROGS += test_null_02.sh diff --git a/tools/testing/selftests/ublk/fault_inject.c b/tools/testing/selftests/ublk/fault_inject.c index 94a8e729ba4c..6e60f7d97125 100644 --- a/tools/testing/selftests/ublk/fault_inject.c +++ b/tools/testing/selftests/ublk/fault_inject.c @@ -16,6 +16,11 @@ static int ublk_fault_inject_tgt_init(const struct dev_ctx *ctx, const struct ublksrv_ctrl_dev_info *info = &dev->dev_info; unsigned long dev_size = 250UL << 30; + if (ctx->auto_zc_fallback) { + ublk_err("%s: not support auto_zc_fallback\n", __func__); + return -EINVAL; + } + dev->tgt.dev_size = dev_size; dev->tgt.params = (struct ublk_params) { .types = UBLK_PARAM_TYPE_BASIC, @@ -41,9 +46,9 @@ static int ublk_fault_inject_queue_io(struct ublk_queue *q, int tag) .tv_nsec = (long long)q->dev->private_data, }; - ublk_queue_alloc_sqes(q, &sqe, 1); + ublk_io_alloc_sqes(ublk_get_io(q, tag), &sqe, 1); io_uring_prep_timeout(sqe, &ts, 1, 0); - sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, 1); + sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1); ublk_queued_tgt_io(q, tag, 1); diff --git a/tools/testing/selftests/ublk/file_backed.c b/tools/testing/selftests/ublk/file_backed.c index 6f34eabfae97..cfa59b631693 100644 --- a/tools/testing/selftests/ublk/file_backed.c +++ b/tools/testing/selftests/ublk/file_backed.c @@ -18,52 +18,56 @@ static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_des unsigned ublk_op = ublksrv_get_op(iod); struct io_uring_sqe *sqe[1]; - ublk_queue_alloc_sqes(q, sqe, 1); + ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1); io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC); io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); /* bit63 marks us as tgt io */ - sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1); + sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); return 1; } static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) { unsigned ublk_op = ublksrv_get_op(iod); - int zc = ublk_queue_use_zc(q); - enum io_uring_op op = ublk_to_uring_op(iod, zc); + unsigned zc = ublk_queue_use_zc(q); + unsigned auto_zc = ublk_queue_use_auto_zc(q); + enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc); struct io_uring_sqe *sqe[3]; + void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr; - if (!zc) { - ublk_queue_alloc_sqes(q, sqe, 1); + if (!zc || auto_zc) { + ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1); if (!sqe[0]) return -ENOMEM; io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/, - (void *)iod->addr, + addr, iod->nr_sectors << 9, iod->start_sector << 9); + if (auto_zc) + sqe[0]->buf_index = tag; io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); /* bit63 marks us as tgt io */ - sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1); + sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); return 1; } - ublk_queue_alloc_sqes(q, sqe, 3); + ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3); - io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag); + io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; sqe[0]->user_data = build_user_data(tag, - ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); + ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0, iod->nr_sectors << 9, iod->start_sector << 9); sqe[1]->buf_index = tag; sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; - sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1); + sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); - io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag); - sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1); + io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); + sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); return 2; } @@ -145,6 +149,11 @@ static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) }, }; + if (ctx->auto_zc_fallback) { + ublk_err("%s: not support auto_zc_fallback\n", __func__); + return -EINVAL; + } + ret = backing_file_tgt_init(dev); if (ret) return ret; diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c index 759f06637146..e2d2042810d4 100644 --- a/tools/testing/selftests/ublk/kublk.c +++ b/tools/testing/selftests/ublk/kublk.c @@ -216,6 +216,30 @@ static int ublk_ctrl_get_features(struct ublk_dev *dev, return __ublk_ctrl_cmd(dev, &data); } +static int ublk_ctrl_update_size(struct ublk_dev *dev, + __u64 nr_sects) +{ + struct ublk_ctrl_cmd_data data = { + .cmd_op = UBLK_U_CMD_UPDATE_SIZE, + .flags = CTRL_CMD_HAS_DATA, + }; + + data.data[0] = nr_sects; + return __ublk_ctrl_cmd(dev, &data); +} + +static int ublk_ctrl_quiesce_dev(struct ublk_dev *dev, + unsigned int timeout_ms) +{ + struct ublk_ctrl_cmd_data data = { + .cmd_op = UBLK_U_CMD_QUIESCE_DEV, + .flags = CTRL_CMD_HAS_DATA, + }; + + data.data[0] = timeout_ms; + return __ublk_ctrl_cmd(dev, &data); +} + static const char *ublk_dev_state_desc(struct ublk_dev *dev) { switch (dev->dev_info.state) { @@ -324,8 +348,8 @@ static void ublk_ctrl_dump(struct ublk_dev *dev) for (i = 0; i < info->nr_hw_queues; i++) { ublk_print_cpu_set(&affinity[i], buf, sizeof(buf)); - printf("\tqueue %u: tid %d affinity(%s)\n", - i, dev->q[i].tid, buf); + printf("\tqueue %u: affinity(%s)\n", + i, buf); } free(affinity); } @@ -388,16 +412,6 @@ static void ublk_queue_deinit(struct ublk_queue *q) int i; int nr_ios = q->q_depth; - io_uring_unregister_buffers(&q->ring); - - io_uring_unregister_ring_fd(&q->ring); - - if (q->ring.ring_fd > 0) { - io_uring_unregister_files(&q->ring); - close(q->ring.ring_fd); - q->ring.ring_fd = -1; - } - if (q->io_cmd_buf) munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q)); @@ -405,25 +419,39 @@ static void ublk_queue_deinit(struct ublk_queue *q) free(q->ios[i].buf_addr); } -static int ublk_queue_init(struct ublk_queue *q) +static void ublk_thread_deinit(struct ublk_thread *t) +{ + io_uring_unregister_buffers(&t->ring); + + io_uring_unregister_ring_fd(&t->ring); + + if (t->ring.ring_fd > 0) { + io_uring_unregister_files(&t->ring); + close(t->ring.ring_fd); + t->ring.ring_fd = -1; + } +} + +static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags) { struct ublk_dev *dev = q->dev; int depth = dev->dev_info.queue_depth; - int i, ret = -1; + int i; int cmd_buf_size, io_buf_size; unsigned long off; - int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth; q->tgt_ops = dev->tgt.ops; q->state = 0; q->q_depth = depth; - q->cmd_inflight = 0; - q->tid = gettid(); - if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) { + if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_AUTO_BUF_REG)) { q->state |= UBLKSRV_NO_BUF; - q->state |= UBLKSRV_ZC; + if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) + q->state |= UBLKSRV_ZC; + if (dev->dev_info.flags & UBLK_F_AUTO_BUF_REG) + q->state |= UBLKSRV_AUTO_BUF_REG; } + q->state |= extra_flags; cmd_buf_size = ublk_queue_cmd_buf_sz(q); off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz(); @@ -439,6 +467,7 @@ static int ublk_queue_init(struct ublk_queue *q) for (i = 0; i < q->q_depth; i++) { q->ios[i].buf_addr = NULL; q->ios[i].flags = UBLKSRV_NEED_FETCH_RQ | UBLKSRV_IO_FREE; + q->ios[i].tag = i; if (q->state & UBLKSRV_NO_BUF) continue; @@ -451,39 +480,57 @@ static int ublk_queue_init(struct ublk_queue *q) } } - ret = ublk_setup_ring(&q->ring, ring_depth, cq_depth, + return 0; + fail: + ublk_queue_deinit(q); + ublk_err("ublk dev %d queue %d failed\n", + dev->dev_info.dev_id, q->q_id); + return -ENOMEM; +} + +static int ublk_thread_init(struct ublk_thread *t) +{ + struct ublk_dev *dev = t->dev; + int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth; + int ret; + + ret = ublk_setup_ring(&t->ring, ring_depth, cq_depth, IORING_SETUP_COOP_TASKRUN | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN); if (ret < 0) { - ublk_err("ublk dev %d queue %d setup io_uring failed %d\n", - q->dev->dev_info.dev_id, q->q_id, ret); + ublk_err("ublk dev %d thread %d setup io_uring failed %d\n", + dev->dev_info.dev_id, t->idx, ret); goto fail; } - if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) { - ret = io_uring_register_buffers_sparse(&q->ring, q->q_depth); + if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_AUTO_BUF_REG)) { + unsigned nr_ios = dev->dev_info.queue_depth * dev->dev_info.nr_hw_queues; + unsigned max_nr_ios_per_thread = nr_ios / dev->nthreads; + max_nr_ios_per_thread += !!(nr_ios % dev->nthreads); + ret = io_uring_register_buffers_sparse( + &t->ring, max_nr_ios_per_thread); if (ret) { - ublk_err("ublk dev %d queue %d register spare buffers failed %d", - dev->dev_info.dev_id, q->q_id, ret); + ublk_err("ublk dev %d thread %d register spare buffers failed %d", + dev->dev_info.dev_id, t->idx, ret); goto fail; } } - io_uring_register_ring_fd(&q->ring); + io_uring_register_ring_fd(&t->ring); - ret = io_uring_register_files(&q->ring, dev->fds, dev->nr_fds); + ret = io_uring_register_files(&t->ring, dev->fds, dev->nr_fds); if (ret) { - ublk_err("ublk dev %d queue %d register files failed %d\n", - q->dev->dev_info.dev_id, q->q_id, ret); + ublk_err("ublk dev %d thread %d register files failed %d\n", + t->dev->dev_info.dev_id, t->idx, ret); goto fail; } return 0; - fail: - ublk_queue_deinit(q); - ublk_err("ublk dev %d queue %d failed\n", - dev->dev_info.dev_id, q->q_id); +fail: + ublk_thread_deinit(t); + ublk_err("ublk dev %d thread %d init failed\n", + dev->dev_info.dev_id, t->idx); return -ENOMEM; } @@ -525,8 +572,27 @@ static void ublk_dev_unprep(struct ublk_dev *dev) close(dev->fds[0]); } -int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) +static void ublk_set_auto_buf_reg(const struct ublk_queue *q, + struct io_uring_sqe *sqe, + unsigned short tag) { + struct ublk_auto_buf_reg buf = {}; + + if (q->tgt_ops->buf_index) + buf.index = q->tgt_ops->buf_index(q, tag); + else + buf.index = q->ios[tag].buf_index; + + if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK) + buf.flags = UBLK_AUTO_BUF_REG_FALLBACK; + + sqe->addr = ublk_auto_buf_reg_to_sqe_addr(&buf); +} + +int ublk_queue_io_cmd(struct ublk_io *io) +{ + struct ublk_thread *t = io->t; + struct ublk_queue *q = ublk_io_to_queue(io); struct ublksrv_io_cmd *cmd; struct io_uring_sqe *sqe[1]; unsigned int cmd_op = 0; @@ -536,23 +602,28 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) if (!(io->flags & UBLKSRV_IO_FREE)) return 0; - /* we issue because we need either fetching or committing */ + /* + * we issue because we need either fetching or committing or + * getting data + */ if (!(io->flags & - (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP))) + (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_NEED_GET_DATA))) return 0; - if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) + if (io->flags & UBLKSRV_NEED_GET_DATA) + cmd_op = UBLK_U_IO_NEED_GET_DATA; + else if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ; else if (io->flags & UBLKSRV_NEED_FETCH_RQ) cmd_op = UBLK_U_IO_FETCH_REQ; - if (io_uring_sq_space_left(&q->ring) < 1) - io_uring_submit(&q->ring); + if (io_uring_sq_space_left(&t->ring) < 1) + io_uring_submit(&t->ring); - ublk_queue_alloc_sqes(q, sqe, 1); + ublk_io_alloc_sqes(io, sqe, 1); if (!sqe[0]) { - ublk_err("%s: run out of sqe %d, tag %d\n", - __func__, q->q_id, tag); + ublk_err("%s: run out of sqe. thread %u, tag %d\n", + __func__, t->idx, io->tag); return -1; } @@ -567,42 +638,80 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) sqe[0]->opcode = IORING_OP_URING_CMD; sqe[0]->flags = IOSQE_FIXED_FILE; sqe[0]->rw_flags = 0; - cmd->tag = tag; + cmd->tag = io->tag; cmd->q_id = q->q_id; if (!(q->state & UBLKSRV_NO_BUF)) cmd->addr = (__u64) (uintptr_t) io->buf_addr; else cmd->addr = 0; - user_data = build_user_data(tag, _IOC_NR(cmd_op), 0, 0); + if (q->state & UBLKSRV_AUTO_BUF_REG) + ublk_set_auto_buf_reg(q, sqe[0], io->tag); + + user_data = build_user_data(io->tag, _IOC_NR(cmd_op), 0, q->q_id, 0); io_uring_sqe_set_data64(sqe[0], user_data); io->flags = 0; - q->cmd_inflight += 1; + t->cmd_inflight += 1; - ublk_dbg(UBLK_DBG_IO_CMD, "%s: (qid %d tag %u cmd_op %u) iof %x stopping %d\n", - __func__, q->q_id, tag, cmd_op, - io->flags, !!(q->state & UBLKSRV_QUEUE_STOPPING)); + ublk_dbg(UBLK_DBG_IO_CMD, "%s: (thread %u qid %d tag %u cmd_op %u) iof %x stopping %d\n", + __func__, t->idx, q->q_id, io->tag, cmd_op, + io->flags, !!(t->state & UBLKSRV_THREAD_STOPPING)); return 1; } -static void ublk_submit_fetch_commands(struct ublk_queue *q) +static void ublk_submit_fetch_commands(struct ublk_thread *t) { - int i = 0; + struct ublk_queue *q; + struct ublk_io *io; + int i = 0, j = 0; - for (i = 0; i < q->q_depth; i++) - ublk_queue_io_cmd(q, &q->ios[i], i); + if (t->dev->per_io_tasks) { + /* + * Lexicographically order all the (qid,tag) pairs, with + * qid taking priority (so (1,0) > (0,1)). Then make + * this thread the daemon for every Nth entry in this + * list (N is the number of threads), starting at this + * thread's index. This ensures that each queue is + * handled by as many ublk server threads as possible, + * so that load that is concentrated on one or a few + * queues can make use of all ublk server threads. + */ + const struct ublksrv_ctrl_dev_info *dinfo = &t->dev->dev_info; + int nr_ios = dinfo->nr_hw_queues * dinfo->queue_depth; + for (i = t->idx; i < nr_ios; i += t->dev->nthreads) { + int q_id = i / dinfo->queue_depth; + int tag = i % dinfo->queue_depth; + q = &t->dev->q[q_id]; + io = &q->ios[tag]; + io->t = t; + io->buf_index = j++; + ublk_queue_io_cmd(io); + } + } else { + /* + * Service exclusively the queue whose q_id matches our + * thread index. + */ + struct ublk_queue *q = &t->dev->q[t->idx]; + for (i = 0; i < q->q_depth; i++) { + io = &q->ios[i]; + io->t = t; + io->buf_index = i; + ublk_queue_io_cmd(io); + } + } } -static int ublk_queue_is_idle(struct ublk_queue *q) +static int ublk_thread_is_idle(struct ublk_thread *t) { - return !io_uring_sq_ready(&q->ring) && !q->io_inflight; + return !io_uring_sq_ready(&t->ring) && !t->io_inflight; } -static int ublk_queue_is_done(struct ublk_queue *q) +static int ublk_thread_is_done(struct ublk_thread *t) { - return (q->state & UBLKSRV_QUEUE_STOPPING) && ublk_queue_is_idle(q); + return (t->state & UBLKSRV_THREAD_STOPPING) && ublk_thread_is_idle(t); } static inline void ublksrv_handle_tgt_cqe(struct ublk_queue *q, @@ -620,14 +729,16 @@ static inline void ublksrv_handle_tgt_cqe(struct ublk_queue *q, q->tgt_ops->tgt_io_done(q, tag, cqe); } -static void ublk_handle_cqe(struct io_uring *r, +static void ublk_handle_cqe(struct ublk_thread *t, struct io_uring_cqe *cqe, void *data) { - struct ublk_queue *q = container_of(r, struct ublk_queue, ring); + struct ublk_dev *dev = t->dev; + unsigned q_id = user_data_to_q_id(cqe->user_data); + struct ublk_queue *q = &dev->q[q_id]; unsigned tag = user_data_to_tag(cqe->user_data); unsigned cmd_op = user_data_to_op(cqe->user_data); int fetch = (cqe->res != UBLK_IO_RES_ABORT) && - !(q->state & UBLKSRV_QUEUE_STOPPING); + !(t->state & UBLKSRV_THREAD_STOPPING); struct ublk_io *io; if (cqe->res < 0 && cqe->res != -ENODEV) @@ -638,7 +749,7 @@ static void ublk_handle_cqe(struct io_uring *r, __func__, cqe->res, q->q_id, tag, cmd_op, is_target_io(cqe->user_data), user_data_to_tgt_data(cqe->user_data), - (q->state & UBLKSRV_QUEUE_STOPPING)); + (t->state & UBLKSRV_THREAD_STOPPING)); /* Don't retrieve io in case of target io */ if (is_target_io(cqe->user_data)) { @@ -647,10 +758,10 @@ static void ublk_handle_cqe(struct io_uring *r, } io = &q->ios[tag]; - q->cmd_inflight--; + t->cmd_inflight--; if (!fetch) { - q->state |= UBLKSRV_QUEUE_STOPPING; + t->state |= UBLKSRV_THREAD_STOPPING; io->flags &= ~UBLKSRV_NEED_FETCH_RQ; } @@ -658,6 +769,9 @@ static void ublk_handle_cqe(struct io_uring *r, assert(tag < q->q_depth); if (q->tgt_ops->queue_io) q->tgt_ops->queue_io(q, tag); + } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) { + io->flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE; + ublk_queue_io_cmd(io); } else { /* * COMMIT_REQ will be completed immediately since no fetching @@ -671,87 +785,93 @@ static void ublk_handle_cqe(struct io_uring *r, } } -static int ublk_reap_events_uring(struct io_uring *r) +static int ublk_reap_events_uring(struct ublk_thread *t) { struct io_uring_cqe *cqe; unsigned head; int count = 0; - io_uring_for_each_cqe(r, head, cqe) { - ublk_handle_cqe(r, cqe, NULL); + io_uring_for_each_cqe(&t->ring, head, cqe) { + ublk_handle_cqe(t, cqe, NULL); count += 1; } - io_uring_cq_advance(r, count); + io_uring_cq_advance(&t->ring, count); return count; } -static int ublk_process_io(struct ublk_queue *q) +static int ublk_process_io(struct ublk_thread *t) { int ret, reapped; - ublk_dbg(UBLK_DBG_QUEUE, "dev%d-q%d: to_submit %d inflight cmd %u stopping %d\n", - q->dev->dev_info.dev_id, - q->q_id, io_uring_sq_ready(&q->ring), - q->cmd_inflight, - (q->state & UBLKSRV_QUEUE_STOPPING)); + ublk_dbg(UBLK_DBG_THREAD, "dev%d-t%u: to_submit %d inflight cmd %u stopping %d\n", + t->dev->dev_info.dev_id, + t->idx, io_uring_sq_ready(&t->ring), + t->cmd_inflight, + (t->state & UBLKSRV_THREAD_STOPPING)); - if (ublk_queue_is_done(q)) + if (ublk_thread_is_done(t)) return -ENODEV; - ret = io_uring_submit_and_wait(&q->ring, 1); - reapped = ublk_reap_events_uring(&q->ring); + ret = io_uring_submit_and_wait(&t->ring, 1); + reapped = ublk_reap_events_uring(t); - ublk_dbg(UBLK_DBG_QUEUE, "submit result %d, reapped %d stop %d idle %d\n", - ret, reapped, (q->state & UBLKSRV_QUEUE_STOPPING), - (q->state & UBLKSRV_QUEUE_IDLE)); + ublk_dbg(UBLK_DBG_THREAD, "submit result %d, reapped %d stop %d idle %d\n", + ret, reapped, (t->state & UBLKSRV_THREAD_STOPPING), + (t->state & UBLKSRV_THREAD_IDLE)); return reapped; } -static void ublk_queue_set_sched_affinity(const struct ublk_queue *q, +static void ublk_thread_set_sched_affinity(const struct ublk_thread *t, cpu_set_t *cpuset) { if (sched_setaffinity(0, sizeof(*cpuset), cpuset) < 0) - ublk_err("ublk dev %u queue %u set affinity failed", - q->dev->dev_info.dev_id, q->q_id); + ublk_err("ublk dev %u thread %u set affinity failed", + t->dev->dev_info.dev_id, t->idx); } -struct ublk_queue_info { - struct ublk_queue *q; - sem_t *queue_sem; +struct ublk_thread_info { + struct ublk_dev *dev; + unsigned idx; + sem_t *ready; cpu_set_t *affinity; }; static void *ublk_io_handler_fn(void *data) { - struct ublk_queue_info *info = data; - struct ublk_queue *q = info->q; - int dev_id = q->dev->dev_info.dev_id; + struct ublk_thread_info *info = data; + struct ublk_thread *t = &info->dev->threads[info->idx]; + int dev_id = info->dev->dev_info.dev_id; int ret; - ret = ublk_queue_init(q); + t->dev = info->dev; + t->idx = info->idx; + + ret = ublk_thread_init(t); if (ret) { - ublk_err("ublk dev %d queue %d init queue failed\n", - dev_id, q->q_id); + ublk_err("ublk dev %d thread %u init failed\n", + dev_id, t->idx); return NULL; } /* IO perf is sensitive with queue pthread affinity on NUMA machine*/ - ublk_queue_set_sched_affinity(q, info->affinity); - sem_post(info->queue_sem); + if (info->affinity) + ublk_thread_set_sched_affinity(t, info->affinity); + sem_post(info->ready); - ublk_dbg(UBLK_DBG_QUEUE, "tid %d: ublk dev %d queue %d started\n", - q->tid, dev_id, q->q_id); + ublk_dbg(UBLK_DBG_THREAD, "tid %d: ublk dev %d thread %u started\n", + gettid(), dev_id, t->idx); /* submit all io commands to ublk driver */ - ublk_submit_fetch_commands(q); + ublk_submit_fetch_commands(t); do { - if (ublk_process_io(q) < 0) + if (ublk_process_io(t) < 0) break; } while (1); - ublk_dbg(UBLK_DBG_QUEUE, "ublk dev %d queue %d exited\n", dev_id, q->q_id); - ublk_queue_deinit(q); + ublk_dbg(UBLK_DBG_THREAD, "tid %d: ublk dev %d thread %d exiting\n", + gettid(), dev_id, t->idx); + ublk_thread_deinit(t); return NULL; } @@ -794,20 +914,20 @@ static int ublk_send_dev_event(const struct dev_ctx *ctx, struct ublk_dev *dev, static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) { const struct ublksrv_ctrl_dev_info *dinfo = &dev->dev_info; - struct ublk_queue_info *qinfo; + struct ublk_thread_info *tinfo; + unsigned extra_flags = 0; cpu_set_t *affinity_buf; void *thread_ret; - sem_t queue_sem; + sem_t ready; int ret, i; ublk_dbg(UBLK_DBG_DEV, "%s enter\n", __func__); - qinfo = (struct ublk_queue_info *)calloc(sizeof(struct ublk_queue_info), - dinfo->nr_hw_queues); - if (!qinfo) + tinfo = calloc(sizeof(struct ublk_thread_info), dev->nthreads); + if (!tinfo) return -ENOMEM; - sem_init(&queue_sem, 0, 0); + sem_init(&ready, 0, 0); ret = ublk_dev_prep(ctx, dev); if (ret) return ret; @@ -816,21 +936,44 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) if (ret) return ret; + if (ctx->auto_zc_fallback) + extra_flags = UBLKSRV_AUTO_BUF_REG_FALLBACK; + for (i = 0; i < dinfo->nr_hw_queues; i++) { dev->q[i].dev = dev; dev->q[i].q_id = i; - qinfo[i].q = &dev->q[i]; - qinfo[i].queue_sem = &queue_sem; - qinfo[i].affinity = &affinity_buf[i]; - pthread_create(&dev->q[i].thread, NULL, + ret = ublk_queue_init(&dev->q[i], extra_flags); + if (ret) { + ublk_err("ublk dev %d queue %d init queue failed\n", + dinfo->dev_id, i); + goto fail; + } + } + + for (i = 0; i < dev->nthreads; i++) { + tinfo[i].dev = dev; + tinfo[i].idx = i; + tinfo[i].ready = &ready; + + /* + * If threads are not tied 1:1 to queues, setting thread + * affinity based on queue affinity makes little sense. + * However, thread CPU affinity has significant impact + * on performance, so to compare fairly, we'll still set + * thread CPU affinity based on queue affinity where + * possible. + */ + if (dev->nthreads == dinfo->nr_hw_queues) + tinfo[i].affinity = &affinity_buf[i]; + pthread_create(&dev->threads[i].thread, NULL, ublk_io_handler_fn, - &qinfo[i]); + &tinfo[i]); } - for (i = 0; i < dinfo->nr_hw_queues; i++) - sem_wait(&queue_sem); - free(qinfo); + for (i = 0; i < dev->nthreads; i++) + sem_wait(&ready); + free(tinfo); free(affinity_buf); /* everything is fine now, start us */ @@ -852,9 +995,11 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) ublk_send_dev_event(ctx, dev, dev->dev_info.dev_id); /* wait until we are terminated */ - for (i = 0; i < dinfo->nr_hw_queues; i++) - pthread_join(dev->q[i].thread, &thread_ret); + for (i = 0; i < dev->nthreads; i++) + pthread_join(dev->threads[i].thread, &thread_ret); fail: + for (i = 0; i < dinfo->nr_hw_queues; i++) + ublk_queue_deinit(&dev->q[i]); ublk_dev_unprep(dev); ublk_dbg(UBLK_DBG_DEV, "%s exit\n", __func__); @@ -960,13 +1105,14 @@ wait: static int __cmd_dev_add(const struct dev_ctx *ctx) { + unsigned nthreads = ctx->nthreads; unsigned nr_queues = ctx->nr_hw_queues; const char *tgt_type = ctx->tgt_type; unsigned depth = ctx->queue_depth; __u64 features; const struct ublk_tgt_ops *ops; struct ublksrv_ctrl_dev_info *info; - struct ublk_dev *dev; + struct ublk_dev *dev = NULL; int dev_id = ctx->dev_id; int ret, i; @@ -974,35 +1120,66 @@ static int __cmd_dev_add(const struct dev_ctx *ctx) if (!ops) { ublk_err("%s: no such tgt type, type %s\n", __func__, tgt_type); - return -ENODEV; + ret = -ENODEV; + goto fail; } if (nr_queues > UBLK_MAX_QUEUES || depth > UBLK_QUEUE_DEPTH) { ublk_err("%s: invalid nr_queues or depth queues %u depth %u\n", __func__, nr_queues, depth); - return -EINVAL; + ret = -EINVAL; + goto fail; + } + + /* default to 1:1 threads:queues if nthreads is unspecified */ + if (!nthreads) + nthreads = nr_queues; + + if (nthreads > UBLK_MAX_THREADS) { + ublk_err("%s: %u is too many threads (max %u)\n", + __func__, nthreads, UBLK_MAX_THREADS); + ret = -EINVAL; + goto fail; + } + + if (nthreads != nr_queues && !ctx->per_io_tasks) { + ublk_err("%s: threads %u must be same as queues %u if " + "not using per_io_tasks\n", + __func__, nthreads, nr_queues); + ret = -EINVAL; + goto fail; } dev = ublk_ctrl_init(); if (!dev) { ublk_err("%s: can't alloc dev id %d, type %s\n", __func__, dev_id, tgt_type); - return -ENOMEM; + ret = -ENOMEM; + goto fail; } /* kernel doesn't support get_features */ ret = ublk_ctrl_get_features(dev, &features); - if (ret < 0) - return -EINVAL; + if (ret < 0) { + ret = -EINVAL; + goto fail; + } - if (!(features & UBLK_F_CMD_IOCTL_ENCODE)) - return -ENOTSUP; + if (!(features & UBLK_F_CMD_IOCTL_ENCODE)) { + ret = -ENOTSUP; + goto fail; + } info = &dev->dev_info; info->dev_id = ctx->dev_id; info->nr_hw_queues = nr_queues; info->queue_depth = depth; info->flags = ctx->flags; + if ((features & UBLK_F_QUIESCE) && + (info->flags & UBLK_F_USER_RECOVERY)) + info->flags |= UBLK_F_QUIESCE; + dev->nthreads = nthreads; + dev->per_io_tasks = ctx->per_io_tasks; dev->tgt.ops = ops; dev->tgt.sq_depth = depth; dev->tgt.cq_depth = depth; @@ -1032,7 +1209,8 @@ static int __cmd_dev_add(const struct dev_ctx *ctx) fail: if (ret < 0) ublk_send_dev_event(ctx, dev, -1); - ublk_ctrl_deinit(dev); + if (dev) + ublk_ctrl_deinit(dev); return ret; } @@ -1094,6 +1272,8 @@ run: shmctl(ctx->_shmid, IPC_RMID, NULL); /* wait for child and detach from it */ wait(NULL); + if (exit_code == EXIT_FAILURE) + ublk_err("%s: command failed\n", __func__); exit(exit_code); } else { exit(EXIT_FAILURE); @@ -1198,6 +1378,10 @@ static int cmd_dev_get_features(void) [const_ilog2(UBLK_F_USER_COPY)] = "USER_COPY", [const_ilog2(UBLK_F_ZONED)] = "ZONED", [const_ilog2(UBLK_F_USER_RECOVERY_FAIL_IO)] = "RECOVERY_FAIL_IO", + [const_ilog2(UBLK_F_UPDATE_SIZE)] = "UPDATE_SIZE", + [const_ilog2(UBLK_F_AUTO_BUF_REG)] = "AUTO_BUF_REG", + [const_ilog2(UBLK_F_QUIESCE)] = "QUIESCE", + [const_ilog2(UBLK_F_PER_IO_DAEMON)] = "PER_IO_DAEMON", }; struct ublk_dev *dev; __u64 features = 0; @@ -1231,16 +1415,71 @@ static int cmd_dev_get_features(void) return ret; } +static int cmd_dev_update_size(struct dev_ctx *ctx) +{ + struct ublk_dev *dev = ublk_ctrl_init(); + struct ublk_params p; + int ret = -EINVAL; + + if (!dev) + return -ENODEV; + + if (ctx->dev_id < 0) { + fprintf(stderr, "device id isn't provided\n"); + goto out; + } + + dev->dev_info.dev_id = ctx->dev_id; + ret = ublk_ctrl_get_params(dev, &p); + if (ret < 0) { + ublk_err("failed to get params %d %s\n", ret, strerror(-ret)); + goto out; + } + + if (ctx->size & ((1 << p.basic.logical_bs_shift) - 1)) { + ublk_err("size isn't aligned with logical block size\n"); + ret = -EINVAL; + goto out; + } + + ret = ublk_ctrl_update_size(dev, ctx->size >> 9); +out: + ublk_ctrl_deinit(dev); + return ret; +} + +static int cmd_dev_quiesce(struct dev_ctx *ctx) +{ + struct ublk_dev *dev = ublk_ctrl_init(); + int ret = -EINVAL; + + if (!dev) + return -ENODEV; + + if (ctx->dev_id < 0) { + fprintf(stderr, "device id isn't provided for quiesce\n"); + goto out; + } + dev->dev_info.dev_id = ctx->dev_id; + ret = ublk_ctrl_quiesce_dev(dev, 10000); + +out: + ublk_ctrl_deinit(dev); + return ret; +} + static void __cmd_create_help(char *exe, bool recovery) { int i; printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n", exe, recovery ? "recover" : "add"); - printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g 0|1]\n"); + printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n"); printf("\t[-e 0|1 ] [-i 0|1]\n"); + printf("\t[--nthreads threads] [--per_io_tasks]\n"); printf("\t[target options] [backfile1] [backfile2] ...\n"); printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n"); + printf("\tdefault: nthreads=nr_queues"); for (i = 0; i < sizeof(tgt_ops_list) / sizeof(tgt_ops_list[0]); i++) { const struct ublk_tgt_ops *ops = tgt_ops_list[i]; @@ -1273,6 +1512,8 @@ static int cmd_dev_help(char *exe) printf("%s list [-n dev_id] -a \n", exe); printf("\t -a list all devices, -n list specified device, default -a \n\n"); printf("%s features\n", exe); + printf("%s update_size -n dev_id -s|--size size_in_bytes \n", exe); + printf("%s quiesce -n dev_id\n", exe); return 0; } @@ -1292,6 +1533,11 @@ int main(int argc, char *argv[]) { "recovery_fail_io", 1, NULL, 'e'}, { "recovery_reissue", 1, NULL, 'i'}, { "get_data", 1, NULL, 'g'}, + { "auto_zc", 0, NULL, 0 }, + { "auto_zc_fallback", 0, NULL, 0 }, + { "size", 1, NULL, 's'}, + { "nthreads", 1, NULL, 0 }, + { "per_io_tasks", 0, NULL, 0 }, { 0, 0, 0, 0 } }; const struct ublk_tgt_ops *ops = NULL; @@ -1313,7 +1559,7 @@ int main(int argc, char *argv[]) opterr = 0; optind = 2; - while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:az", + while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:s:gaz", longopts, &option_idx)) != -1) { switch (opt) { case 'a': @@ -1351,9 +1597,11 @@ int main(int argc, char *argv[]) ctx.flags |= UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE; break; case 'g': - value = strtol(optarg, NULL, 10); - if (value) - ctx.flags |= UBLK_F_NEED_GET_DATA; + ctx.flags |= UBLK_F_NEED_GET_DATA; + break; + case 's': + ctx.size = strtoull(optarg, NULL, 10); + break; case 0: if (!strcmp(longopts[option_idx].name, "debug_mask")) ublk_dbg_mask = strtol(optarg, NULL, 16); @@ -1361,6 +1609,14 @@ int main(int argc, char *argv[]) ublk_dbg_mask = 0; if (!strcmp(longopts[option_idx].name, "foreground")) ctx.fg = 1; + if (!strcmp(longopts[option_idx].name, "auto_zc")) + ctx.flags |= UBLK_F_AUTO_BUF_REG; + if (!strcmp(longopts[option_idx].name, "auto_zc_fallback")) + ctx.auto_zc_fallback = 1; + if (!strcmp(longopts[option_idx].name, "nthreads")) + ctx.nthreads = strtol(optarg, NULL, 10); + if (!strcmp(longopts[option_idx].name, "per_io_tasks")) + ctx.per_io_tasks = 1; break; case '?': /* @@ -1384,6 +1640,16 @@ int main(int argc, char *argv[]) } } + /* auto_zc_fallback depends on F_AUTO_BUF_REG & F_SUPPORT_ZERO_COPY */ + if (ctx.auto_zc_fallback && + !((ctx.flags & UBLK_F_AUTO_BUF_REG) && + (ctx.flags & UBLK_F_SUPPORT_ZERO_COPY))) { + ublk_err("%s: auto_zc_fallback is set but neither " + "F_AUTO_BUF_REG nor F_SUPPORT_ZERO_COPY is enabled\n", + __func__); + return -EINVAL; + } + i = optind; while (i < argc && ctx.nr_files < MAX_BACK_FILES) { ctx.files[ctx.nr_files++] = argv[i++]; @@ -1416,6 +1682,10 @@ int main(int argc, char *argv[]) ret = cmd_dev_help(argv[0]); else if (!strcmp(cmd, "features")) ret = cmd_dev_get_features(); + else if (!strcmp(cmd, "update_size")) + ret = cmd_dev_update_size(&ctx); + else if (!strcmp(cmd, "quiesce")) + ret = cmd_dev_quiesce(&ctx); else cmd_dev_help(argv[0]); diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h index 29571eb296f1..6be601536b3d 100644 --- a/tools/testing/selftests/ublk/kublk.h +++ b/tools/testing/selftests/ublk/kublk.h @@ -19,7 +19,6 @@ #include <sys/inotify.h> #include <sys/wait.h> #include <sys/eventfd.h> -#include <sys/uio.h> #include <sys/ipc.h> #include <sys/shm.h> #include <linux/io_uring.h> @@ -50,11 +49,14 @@ #define UBLKSRV_IO_IDLE_SECS 20 #define UBLK_IO_MAX_BYTES (1 << 20) -#define UBLK_MAX_QUEUES 32 +#define UBLK_MAX_QUEUES_SHIFT 5 +#define UBLK_MAX_QUEUES (1 << UBLK_MAX_QUEUES_SHIFT) +#define UBLK_MAX_THREADS_SHIFT 5 +#define UBLK_MAX_THREADS (1 << UBLK_MAX_THREADS_SHIFT) #define UBLK_QUEUE_DEPTH 1024 #define UBLK_DBG_DEV (1U << 0) -#define UBLK_DBG_QUEUE (1U << 1) +#define UBLK_DBG_THREAD (1U << 1) #define UBLK_DBG_IO_CMD (1U << 2) #define UBLK_DBG_IO (1U << 3) #define UBLK_DBG_CTRL_CMD (1U << 4) @@ -62,6 +64,7 @@ struct ublk_dev; struct ublk_queue; +struct ublk_thread; struct stripe_ctx { /* stripe */ @@ -77,6 +80,7 @@ struct dev_ctx { char tgt_type[16]; unsigned long flags; unsigned nr_hw_queues; + unsigned short nthreads; unsigned queue_depth; int dev_id; int nr_files; @@ -85,9 +89,8 @@ struct dev_ctx { unsigned int all:1; unsigned int fg:1; unsigned int recovery:1; - - /* fault_inject */ - long long delay_us; + unsigned int auto_zc_fallback:1; + unsigned int per_io_tasks:1; int _evtfd; int _shmid; @@ -95,6 +98,9 @@ struct dev_ctx { /* built from shmem, only for ublk_dump_dev() */ struct ublk_dev *shadow_dev; + /* for 'update_size' command */ + unsigned long long size; + union { struct stripe_ctx stripe; struct fault_inject_ctx fault_inject; @@ -118,13 +124,19 @@ struct ublk_io { #define UBLKSRV_NEED_FETCH_RQ (1UL << 0) #define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1) #define UBLKSRV_IO_FREE (1UL << 2) +#define UBLKSRV_NEED_GET_DATA (1UL << 3) +#define UBLKSRV_NEED_REG_BUF (1UL << 4) unsigned short flags; unsigned short refs; /* used by target code only */ + int tag; + int result; + unsigned short buf_index; unsigned short tgt_ios; void *private_data; + struct ublk_thread *t; }; struct ublk_tgt_ops { @@ -143,6 +155,9 @@ struct ublk_tgt_ops { */ void (*parse_cmd_line)(struct dev_ctx *ctx, int argc, char *argv[]); void (*usage)(const struct ublk_tgt_ops *ops); + + /* return buffer index for UBLK_F_AUTO_BUF_REG */ + unsigned short (*buf_index)(const struct ublk_queue *, int tag); }; struct ublk_tgt { @@ -160,26 +175,39 @@ struct ublk_tgt { struct ublk_queue { int q_id; int q_depth; - unsigned int cmd_inflight; - unsigned int io_inflight; struct ublk_dev *dev; const struct ublk_tgt_ops *tgt_ops; struct ublksrv_io_desc *io_cmd_buf; - struct io_uring ring; + struct ublk_io ios[UBLK_QUEUE_DEPTH]; -#define UBLKSRV_QUEUE_STOPPING (1U << 0) -#define UBLKSRV_QUEUE_IDLE (1U << 1) #define UBLKSRV_NO_BUF (1U << 2) #define UBLKSRV_ZC (1U << 3) +#define UBLKSRV_AUTO_BUF_REG (1U << 4) +#define UBLKSRV_AUTO_BUF_REG_FALLBACK (1U << 5) unsigned state; - pid_t tid; +}; + +struct ublk_thread { + struct ublk_dev *dev; + struct io_uring ring; + unsigned int cmd_inflight; + unsigned int io_inflight; + pthread_t thread; + unsigned idx; + +#define UBLKSRV_THREAD_STOPPING (1U << 0) +#define UBLKSRV_THREAD_IDLE (1U << 1) + unsigned state; }; struct ublk_dev { struct ublk_tgt tgt; struct ublksrv_ctrl_dev_info dev_info; struct ublk_queue q[UBLK_MAX_QUEUES]; + struct ublk_thread threads[UBLK_MAX_THREADS]; + unsigned nthreads; + unsigned per_io_tasks; int fds[MAX_BACK_FILES + 1]; /* fds[0] points to /dev/ublkcN */ int nr_fds; @@ -204,7 +232,13 @@ struct ublk_dev { extern unsigned int ublk_dbg_mask; -extern int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag); +extern int ublk_queue_io_cmd(struct ublk_io *io); + + +static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod) +{ + return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF); +} static inline int is_target_io(__u64 user_data) { @@ -212,11 +246,14 @@ static inline int is_target_io(__u64 user_data) } static inline __u64 build_user_data(unsigned tag, unsigned op, - unsigned tgt_data, unsigned is_target_io) + unsigned tgt_data, unsigned q_id, unsigned is_target_io) { - assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16)); + /* we only have 7 bits to encode q_id */ + _Static_assert(UBLK_MAX_QUEUES_SHIFT <= 7); + assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16) && !(q_id >> 7)); - return tag | (op << 16) | (tgt_data << 24) | (__u64)is_target_io << 63; + return tag | (op << 16) | (tgt_data << 24) | + (__u64)q_id << 56 | (__u64)is_target_io << 63; } static inline unsigned int user_data_to_tag(__u64 user_data) @@ -234,6 +271,11 @@ static inline unsigned int user_data_to_tgt_data(__u64 user_data) return (user_data >> 24) & 0xffff; } +static inline unsigned int user_data_to_q_id(__u64 user_data) +{ + return (user_data >> 56) & 0x7f; +} + static inline unsigned short ublk_cmd_op_nr(unsigned int op) { return _IOC_NR(op); @@ -267,17 +309,23 @@ static inline void ublk_dbg(int level, const char *fmt, ...) } } -static inline int ublk_queue_alloc_sqes(struct ublk_queue *q, +static inline struct ublk_queue *ublk_io_to_queue(const struct ublk_io *io) +{ + return container_of(io, struct ublk_queue, ios[io->tag]); +} + +static inline int ublk_io_alloc_sqes(struct ublk_io *io, struct io_uring_sqe *sqes[], int nr_sqes) { - unsigned left = io_uring_sq_space_left(&q->ring); + struct io_uring *ring = &io->t->ring; + unsigned left = io_uring_sq_space_left(ring); int i; if (left < nr_sqes) - io_uring_submit(&q->ring); + io_uring_submit(ring); for (i = 0; i < nr_sqes; i++) { - sqes[i] = io_uring_get_sqe(&q->ring); + sqes[i] = io_uring_get_sqe(ring); if (!sqes[i]) return i; } @@ -360,7 +408,7 @@ static inline int ublk_complete_io(struct ublk_queue *q, unsigned tag, int res) ublk_mark_io_done(io, res); - return ublk_queue_io_cmd(q, io, tag); + return ublk_queue_io_cmd(io); } static inline void ublk_queued_tgt_io(struct ublk_queue *q, unsigned tag, int queued) @@ -370,7 +418,7 @@ static inline void ublk_queued_tgt_io(struct ublk_queue *q, unsigned tag, int qu else { struct ublk_io *io = ublk_get_io(q, tag); - q->io_inflight += queued; + io->t->io_inflight += queued; io->tgt_ios = queued; io->result = 0; } @@ -380,7 +428,7 @@ static inline int ublk_completed_tgt_io(struct ublk_queue *q, unsigned tag) { struct ublk_io *io = ublk_get_io(q, tag); - q->io_inflight--; + io->t->io_inflight--; return --io->tgt_ios == 0; } @@ -390,6 +438,11 @@ static inline int ublk_queue_use_zc(const struct ublk_queue *q) return q->state & UBLKSRV_ZC; } +static inline int ublk_queue_use_auto_zc(const struct ublk_queue *q) +{ + return q->state & UBLKSRV_AUTO_BUF_REG; +} + extern const struct ublk_tgt_ops null_tgt_ops; extern const struct ublk_tgt_ops loop_tgt_ops; extern const struct ublk_tgt_ops stripe_tgt_ops; diff --git a/tools/testing/selftests/ublk/null.c b/tools/testing/selftests/ublk/null.c index 91fec3690d4b..afe0b99d77ee 100644 --- a/tools/testing/selftests/ublk/null.c +++ b/tools/testing/selftests/ublk/null.c @@ -42,33 +42,51 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) return 0; } +static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod, + struct io_uring_sqe *sqe, int q_id) +{ + unsigned ublk_op = ublksrv_get_op(iod); + + io_uring_prep_nop(sqe); + sqe->buf_index = tag; + sqe->flags |= IOSQE_FIXED_FILE; + sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT; + sqe->len = iod->nr_sectors << 9; /* injected result */ + sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1); +} + static int null_queue_zc_io(struct ublk_queue *q, int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); - unsigned ublk_op = ublksrv_get_op(iod); struct io_uring_sqe *sqe[3]; - ublk_queue_alloc_sqes(q, sqe, 3); + ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3); - io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag); + io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); sqe[0]->user_data = build_user_data(tag, - ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); + ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; - io_uring_prep_nop(sqe[1]); - sqe[1]->buf_index = tag; - sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; - sqe[1]->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT; - sqe[1]->len = iod->nr_sectors << 9; /* injected result */ - sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1); + __setup_nop_io(tag, iod, sqe[1], q->q_id); + sqe[1]->flags |= IOSQE_IO_HARDLINK; - io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag); - sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1); + io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); + sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); // buf register is marked as IOSQE_CQE_SKIP_SUCCESS return 2; } +static int null_queue_auto_zc_io(struct ublk_queue *q, int tag) +{ + const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); + struct io_uring_sqe *sqe[1]; + + ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1); + __setup_nop_io(tag, iod, sqe[0], q->q_id); + return 1; +} + static void ublk_null_io_done(struct ublk_queue *q, int tag, const struct io_uring_cqe *cqe) { @@ -94,22 +112,37 @@ static void ublk_null_io_done(struct ublk_queue *q, int tag, static int ublk_null_queue_io(struct ublk_queue *q, int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); - int zc = ublk_queue_use_zc(q); + unsigned auto_zc = ublk_queue_use_auto_zc(q); + unsigned zc = ublk_queue_use_zc(q); int queued; - if (!zc) { + if (auto_zc && !ublk_io_auto_zc_fallback(iod)) + queued = null_queue_auto_zc_io(q, tag); + else if (zc) + queued = null_queue_zc_io(q, tag); + else { ublk_complete_io(q, tag, iod->nr_sectors << 9); return 0; } - - queued = null_queue_zc_io(q, tag); ublk_queued_tgt_io(q, tag, queued); return 0; } +/* + * return invalid buffer index for triggering auto buffer register failure, + * then UBLK_IO_RES_NEED_REG_BUF handling is covered + */ +static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag) +{ + if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK) + return (unsigned short)-1; + return q->ios[tag].buf_index; +} + const struct ublk_tgt_ops null_tgt_ops = { .name = "null", .init_tgt = ublk_null_tgt_init, .queue_io = ublk_null_queue_io, .tgt_io_done = ublk_null_io_done, + .buf_index = ublk_null_buf_index, }; diff --git a/tools/testing/selftests/ublk/stripe.c b/tools/testing/selftests/ublk/stripe.c index 5dbd6392d83d..37d50bbf5f5e 100644 --- a/tools/testing/selftests/ublk/stripe.c +++ b/tools/testing/selftests/ublk/stripe.c @@ -70,7 +70,7 @@ static void free_stripe_array(struct stripe_array *s) } static void calculate_stripe_array(const struct stripe_conf *conf, - const struct ublksrv_io_desc *iod, struct stripe_array *s) + const struct ublksrv_io_desc *iod, struct stripe_array *s, void *base) { const unsigned shift = conf->shift - 9; const unsigned chunk_sects = 1 << shift; @@ -102,7 +102,7 @@ static void calculate_stripe_array(const struct stripe_conf *conf, } assert(this->nr_vec < this->cap); - this->vec[this->nr_vec].iov_base = (void *)(iod->addr + done); + this->vec[this->nr_vec].iov_base = (void *)(base + done); this->vec[this->nr_vec++].iov_len = nr_sects << 9; start += nr_sects; @@ -126,23 +126,25 @@ static inline enum io_uring_op stripe_to_uring_op( static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) { const struct stripe_conf *conf = get_chunk_shift(q); - int zc = !!(ublk_queue_use_zc(q) != 0); - enum io_uring_op op = stripe_to_uring_op(iod, zc); + unsigned auto_zc = (ublk_queue_use_auto_zc(q) != 0); + unsigned zc = (ublk_queue_use_zc(q) != 0); + enum io_uring_op op = stripe_to_uring_op(iod, zc | auto_zc); struct io_uring_sqe *sqe[NR_STRIPE]; struct stripe_array *s = alloc_stripe_array(conf, iod); struct ublk_io *io = ublk_get_io(q, tag); int i, extra = zc ? 2 : 0; + void *base = (zc | auto_zc) ? NULL : (void *)iod->addr; io->private_data = s; - calculate_stripe_array(conf, iod, s); + calculate_stripe_array(conf, iod, s, base); - ublk_queue_alloc_sqes(q, sqe, s->nr + extra); + ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, s->nr + extra); if (zc) { - io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag); + io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, io->buf_index); sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; sqe[0]->user_data = build_user_data(tag, - ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); + ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); } for (i = zc; i < s->nr + extra - zc; i++) { @@ -153,21 +155,21 @@ static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_ (void *)t->vec, t->nr_vec, t->start << 9); - if (zc) { + io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE); + if (auto_zc || zc) { sqe[i]->buf_index = tag; - io_uring_sqe_set_flags(sqe[i], - IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK); - } else { - io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE); + if (zc) + sqe[i]->flags |= IOSQE_IO_HARDLINK; } /* bit63 marks us as tgt io */ - sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, 1); + sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, q->q_id, 1); } if (zc) { struct io_uring_sqe *unreg = sqe[s->nr + 1]; - io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, tag); - unreg->user_data = build_user_data(tag, ublk_cmd_op_nr(unreg->cmd_op), 0, 1); + io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, io->buf_index); + unreg->user_data = build_user_data( + tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1); } /* register buffer is skip_success */ @@ -180,11 +182,11 @@ static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, struct io_uring_sqe *sqe[NR_STRIPE]; int i; - ublk_queue_alloc_sqes(q, sqe, conf->nr_files); + ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, conf->nr_files); for (i = 0; i < conf->nr_files; i++) { io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC); io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE); - sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, 1); + sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, q->q_id, 1); } return conf->nr_files; } @@ -287,6 +289,11 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) loff_t bytes = 0; int ret, i, mul = 1; + if (ctx->auto_zc_fallback) { + ublk_err("%s: not support auto_zc_fallback\n", __func__); + return -EINVAL; + } + if ((chunk_size & (chunk_size - 1)) || !chunk_size) { ublk_err("invalid chunk size %u\n", chunk_size); return -EINVAL; diff --git a/tools/testing/selftests/ublk/test_common.sh b/tools/testing/selftests/ublk/test_common.sh index 9fc111f64576..8a4dbd09feb0 100755 --- a/tools/testing/selftests/ublk/test_common.sh +++ b/tools/testing/selftests/ublk/test_common.sh @@ -17,12 +17,17 @@ _get_disk_dev_t() { local minor dev=/dev/ublkb"${dev_id}" - major=$(stat -c '%Hr' "$dev") - minor=$(stat -c '%Lr' "$dev") + major="0x"$(stat -c '%t' "$dev") + minor="0x"$(stat -c '%T' "$dev") echo $(( (major & 0xfff) << 20 | (minor & 0xfffff) )) } +_get_disk_size() +{ + lsblk -b -o SIZE -n "$1" +} + _run_fio_verify_io() { fio --name=verify --rw=randwrite --direct=1 --ioengine=libaio \ --bs=8k --iodepth=32 --verify=crc32c --do_verify=1 \ @@ -215,6 +220,26 @@ _recover_ublk_dev() { echo "$state" } +# quiesce device and return ublk device state +__ublk_quiesce_dev() +{ + local dev_id=$1 + local exp_state=$2 + local state + + if ! ${UBLK_PROG} quiesce -n "${dev_id}"; then + state=$(_get_ublk_dev_state "${dev_id}") + return "$state" + fi + + for ((j=0;j<50;j++)); do + state=$(_get_ublk_dev_state "${dev_id}") + [ "$state" == "$exp_state" ] && break + sleep 1 + done + echo "$state" +} + # kill the ublk daemon and return ublk device state __ublk_kill_daemon() { @@ -251,8 +276,13 @@ __run_io_and_remove() local kill_server=$3 fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \ - --rw=readwrite --iodepth=256 --size="${size}" --numjobs=4 \ + --rw=randrw --norandommap --iodepth=256 --size="${size}" --numjobs="$(nproc)" \ --runtime=20 --time_based > /dev/null 2>&1 & + fio --name=batchjob --filename=/dev/ublkb"${dev_id}" --ioengine=io_uring \ + --rw=randrw --norandommap --iodepth=256 --size="${size}" \ + --numjobs="$(nproc)" --runtime=20 --time_based \ + --iodepth_batch_submit=32 --iodepth_batch_complete_min=32 \ + --force_async=7 > /dev/null 2>&1 & sleep 2 if [ "${kill_server}" = "yes" ]; then local state @@ -303,20 +333,26 @@ run_io_and_kill_daemon() run_io_and_recover() { + local action=$1 local state local dev_id + shift 1 dev_id=$(_add_ublk_dev "$@") _check_add_dev "$TID" $? fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \ - --rw=readwrite --iodepth=256 --size="${size}" --numjobs=4 \ + --rw=randread --iodepth=256 --size="${size}" --numjobs=4 \ --runtime=20 --time_based > /dev/null 2>&1 & sleep 4 - state=$(__ublk_kill_daemon "${dev_id}" "QUIESCED") + if [ "$action" == "kill_daemon" ]; then + state=$(__ublk_kill_daemon "${dev_id}" "QUIESCED") + elif [ "$action" == "quiesce_dev" ]; then + state=$(__ublk_quiesce_dev "${dev_id}" "QUIESCED") + fi if [ "$state" != "QUIESCED" ]; then - echo "device isn't quiesced($state) after killing daemon" + echo "device isn't quiesced($state) after $action" return 255 fi diff --git a/tools/testing/selftests/ublk/test_generic_04.sh b/tools/testing/selftests/ublk/test_generic_04.sh index 8a3bc080c577..8b533217d4a1 100755 --- a/tools/testing/selftests/ublk/test_generic_04.sh +++ b/tools/testing/selftests/ublk/test_generic_04.sh @@ -8,7 +8,7 @@ ERR_CODE=0 ublk_run_recover_test() { - run_io_and_recover "$@" + run_io_and_recover "kill_daemon" "$@" ERR_CODE=$? if [ ${ERR_CODE} -ne 0 ]; then echo "$TID failure: $*" diff --git a/tools/testing/selftests/ublk/test_generic_05.sh b/tools/testing/selftests/ublk/test_generic_05.sh index 714630b4b329..398e9e2b58e1 100755 --- a/tools/testing/selftests/ublk/test_generic_05.sh +++ b/tools/testing/selftests/ublk/test_generic_05.sh @@ -3,12 +3,12 @@ . "$(cd "$(dirname "$0")" && pwd)"/test_common.sh -TID="generic_04" +TID="generic_05" ERR_CODE=0 ublk_run_recover_test() { - run_io_and_recover "$@" + run_io_and_recover "kill_daemon" "$@" ERR_CODE=$? if [ ${ERR_CODE} -ne 0 ]; then echo "$TID failure: $*" diff --git a/tools/testing/selftests/ublk/test_generic_06.sh b/tools/testing/selftests/ublk/test_generic_06.sh index b67230c42c84..fd42062b7b76 100755 --- a/tools/testing/selftests/ublk/test_generic_06.sh +++ b/tools/testing/selftests/ublk/test_generic_06.sh @@ -17,7 +17,7 @@ STARTTIME=${SECONDS} dd if=/dev/urandom of=/dev/ublkb${dev_id} oflag=direct bs=4k count=1 status=none > /dev/null 2>&1 & dd_pid=$! -__ublk_kill_daemon ${dev_id} "DEAD" +__ublk_kill_daemon ${dev_id} "DEAD" >/dev/null wait $dd_pid dd_exitcode=$? diff --git a/tools/testing/selftests/ublk/test_generic_07.sh b/tools/testing/selftests/ublk/test_generic_07.sh new file mode 100755 index 000000000000..cba86451fa5e --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_07.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_07" +ERR_CODE=0 + +if ! _have_program fio; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "generic" "test UBLK_F_NEED_GET_DATA" + +_create_backfile 0 256M +dev_id=$(_add_ublk_dev -t loop -q 2 -g "${UBLK_BACKFILES[0]}") +_check_add_dev $TID $? + +# run fio over the ublk disk +_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M +ERR_CODE=$? +if [ "$ERR_CODE" -eq 0 ]; then + _mkfs_mount_test /dev/ublkb"${dev_id}" + ERR_CODE=$? +fi + +_cleanup_test "generic" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_08.sh b/tools/testing/selftests/ublk/test_generic_08.sh new file mode 100755 index 000000000000..b222f3a77e12 --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_08.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_08" +ERR_CODE=0 + +if ! _have_feature "AUTO_BUF_REG"; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "generic" "test UBLK_F_AUTO_BUF_REG" + +_create_backfile 0 256M +_create_backfile 1 256M + +dev_id=$(_add_ublk_dev -t loop -q 2 --auto_zc "${UBLK_BACKFILES[0]}") +_check_add_dev $TID $? + +if ! _mkfs_mount_test /dev/ublkb"${dev_id}"; then + _cleanup_test "generic" + _show_result $TID 255 +fi + +dev_id=$(_add_ublk_dev -t stripe --auto_zc "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}") +_check_add_dev $TID $? +_mkfs_mount_test /dev/ublkb"${dev_id}" +ERR_CODE=$? + +_cleanup_test "generic" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_09.sh b/tools/testing/selftests/ublk/test_generic_09.sh new file mode 100755 index 000000000000..bb6f77ca5522 --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_09.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_09" +ERR_CODE=0 + +if ! _have_feature "AUTO_BUF_REG"; then + exit "$UBLK_SKIP_CODE" +fi + +if ! _have_program fio; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "null" "basic IO test" + +dev_id=$(_add_ublk_dev -t null -z --auto_zc --auto_zc_fallback) +_check_add_dev $TID $? + +# run fio over the two disks +fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 +ERR_CODE=$? + +_cleanup_test "null" + +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_10.sh b/tools/testing/selftests/ublk/test_generic_10.sh new file mode 100755 index 000000000000..abc11c3d416b --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_10.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_10" +ERR_CODE=0 + +if ! _have_feature "UPDATE_SIZE"; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "null" "check update size" + +dev_id=$(_add_ublk_dev -t null) +_check_add_dev $TID $? + +size=$(_get_disk_size /dev/ublkb"${dev_id}") +size=$(( size / 2 )) +if ! "$UBLK_PROG" update_size -n "$dev_id" -s "$size"; then + ERR_CODE=255 +fi + +new_size=$(_get_disk_size /dev/ublkb"${dev_id}") +if [ "$new_size" != "$size" ]; then + ERR_CODE=255 +fi + +_cleanup_test "null" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_11.sh b/tools/testing/selftests/ublk/test_generic_11.sh new file mode 100755 index 000000000000..a00357a5ec6b --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_11.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_11" +ERR_CODE=0 + +ublk_run_quiesce_recover() +{ + run_io_and_recover "quiesce_dev" "$@" + ERR_CODE=$? + if [ ${ERR_CODE} -ne 0 ]; then + echo "$TID failure: $*" + _show_result $TID $ERR_CODE + fi +} + +if ! _have_feature "QUIESCE"; then + exit "$UBLK_SKIP_CODE" +fi + +if ! _have_program fio; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "quiesce" "basic quiesce & recover function verification" + +_create_backfile 0 256M +_create_backfile 1 128M +_create_backfile 2 128M + +ublk_run_quiesce_recover -t null -q 2 -r 1 & +ublk_run_quiesce_recover -t loop -q 2 -r 1 "${UBLK_BACKFILES[0]}" & +ublk_run_quiesce_recover -t stripe -q 2 -r 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & +wait + +ublk_run_quiesce_recover -t null -q 2 -r 1 -i 1 & +ublk_run_quiesce_recover -t loop -q 2 -r 1 -i 1 "${UBLK_BACKFILES[0]}" & +ublk_run_quiesce_recover -t stripe -q 2 -r 1 -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & +wait + +_cleanup_test "quiesce" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_12.sh b/tools/testing/selftests/ublk/test_generic_12.sh new file mode 100755 index 000000000000..7abbb00d251d --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_12.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_12" +ERR_CODE=0 + +if ! _have_program bpftrace; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "null" "do imbalanced load, it should be balanced over I/O threads" + +NTHREADS=6 +dev_id=$(_add_ublk_dev -t null -q 4 -d 16 --nthreads $NTHREADS --per_io_tasks) +_check_add_dev $TID $? + +dev_t=$(_get_disk_dev_t "$dev_id") +bpftrace trace/count_ios_per_tid.bt "$dev_t" > "$UBLK_TMP" 2>&1 & +btrace_pid=$! +sleep 2 + +if ! kill -0 "$btrace_pid" > /dev/null 2>&1; then + _cleanup_test "null" + exit "$UBLK_SKIP_CODE" +fi + +# do imbalanced I/O on the ublk device +# pin to cpu 0 to prevent migration/only target one queue +fio --name=write_seq \ + --filename=/dev/ublkb"${dev_id}" \ + --ioengine=libaio --iodepth=16 \ + --rw=write \ + --size=512M \ + --direct=1 \ + --bs=4k \ + --cpus_allowed=0 > /dev/null 2>&1 +ERR_CODE=$? +kill "$btrace_pid" +wait + +# check that every task handles some I/O, even though all I/O was issued +# from a single CPU. when ublk gets support for round-robin tag +# allocation, this check can be strengthened to assert that every thread +# handles the same number of I/Os +NR_THREADS_THAT_HANDLED_IO=$(grep -c '@' ${UBLK_TMP}) +if [[ $NR_THREADS_THAT_HANDLED_IO -ne $NTHREADS ]]; then + echo "only $NR_THREADS_THAT_HANDLED_IO handled I/O! expected $NTHREADS" + cat "$UBLK_TMP" + ERR_CODE=255 +fi + +_cleanup_test "null" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_02.sh b/tools/testing/selftests/ublk/test_stress_02.sh index 1a9065125ae1..4bdd921081e5 100755 --- a/tools/testing/selftests/ublk/test_stress_02.sh +++ b/tools/testing/selftests/ublk/test_stress_02.sh @@ -25,10 +25,12 @@ _create_backfile 0 256M _create_backfile 1 128M _create_backfile 2 128M -ublk_io_and_kill_daemon 8G -t null -q 4 & -ublk_io_and_kill_daemon 256M -t loop -q 4 "${UBLK_BACKFILES[0]}" & -ublk_io_and_kill_daemon 256M -t stripe -q 4 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & -wait +for nr_queue in 1 4; do + ublk_io_and_kill_daemon 8G -t null -q "$nr_queue" & + ublk_io_and_kill_daemon 256M -t loop -q "$nr_queue" "${UBLK_BACKFILES[0]}" & + ublk_io_and_kill_daemon 256M -t stripe -q "$nr_queue" "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + wait +done _cleanup_test "stress" _show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_03.sh b/tools/testing/selftests/ublk/test_stress_03.sh index e0854f71d35b..3ed4c9b2d8c0 100755 --- a/tools/testing/selftests/ublk/test_stress_03.sh +++ b/tools/testing/selftests/ublk/test_stress_03.sh @@ -34,5 +34,21 @@ ublk_io_and_remove 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" & ublk_io_and_remove 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & wait +if _have_feature "AUTO_BUF_REG"; then + ublk_io_and_remove 8G -t null -q 4 --auto_zc & + ublk_io_and_remove 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback & + wait +fi + +if _have_feature "PER_IO_DAEMON"; then + ublk_io_and_remove 8G -t null -q 4 --auto_zc --nthreads 8 --per_io_tasks & + ublk_io_and_remove 256M -t loop -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 256M -t stripe -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback --nthreads 8 --per_io_tasks & + wait +fi + _cleanup_test "stress" _show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_04.sh b/tools/testing/selftests/ublk/test_stress_04.sh index 1798a98387e8..40d1437ca298 100755 --- a/tools/testing/selftests/ublk/test_stress_04.sh +++ b/tools/testing/selftests/ublk/test_stress_04.sh @@ -31,6 +31,20 @@ _create_backfile 2 128M ublk_io_and_kill_daemon 8G -t null -q 4 -z & ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" & ublk_io_and_kill_daemon 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + +if _have_feature "AUTO_BUF_REG"; then + ublk_io_and_kill_daemon 8G -t null -q 4 --auto_zc & + ublk_io_and_kill_daemon 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" & + ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback & +fi + +if _have_feature "PER_IO_DAEMON"; then + ublk_io_and_kill_daemon 8G -t null -q 4 --nthreads 8 --per_io_tasks & + ublk_io_and_kill_daemon 256M -t loop -q 4 --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & + ublk_io_and_kill_daemon 256M -t stripe -q 4 --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_kill_daemon 8G -t null -q 4 --nthreads 8 --per_io_tasks & +fi wait _cleanup_test "stress" diff --git a/tools/testing/selftests/ublk/test_stress_05.sh b/tools/testing/selftests/ublk/test_stress_05.sh index a7071b10224d..566cfd90d192 100755 --- a/tools/testing/selftests/ublk/test_stress_05.sh +++ b/tools/testing/selftests/ublk/test_stress_05.sh @@ -47,18 +47,34 @@ _create_backfile 0 256M _create_backfile 1 256M for reissue in $(seq 0 1); do - ublk_io_and_remove 8G -t null -q 4 -g 1 -r 1 -i "$reissue" & - ublk_io_and_remove 256M -t loop -q 4 -g 1 -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 8G -t null -q 4 -g -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 -g -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & wait done if _have_feature "ZERO_COPY"; then for reissue in $(seq 0 1); do - ublk_io_and_remove 8G -t null -q 4 -g 1 -z -r 1 -i "$reissue" & - ublk_io_and_remove 256M -t loop -q 4 -g 1 -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & + ublk_io_and_remove 8G -t null -q 4 -g -z -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 -g -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & wait done fi +if _have_feature "AUTO_BUF_REG"; then + for reissue in $(seq 0 1); do + ublk_io_and_remove 8G -t null -q 4 -g --auto_zc -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 -g --auto_zc -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & + ublk_io_and_remove 8G -t null -q 4 -g -z --auto_zc --auto_zc_fallback -r 1 -i "$reissue" & + wait + done +fi + +if _have_feature "PER_IO_DAEMON"; then + ublk_io_and_remove 8G -t null -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 8G -t null -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" & +fi +wait + _cleanup_test "stress" _show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/trace/count_ios_per_tid.bt b/tools/testing/selftests/ublk/trace/count_ios_per_tid.bt new file mode 100644 index 000000000000..f4aa63ff2938 --- /dev/null +++ b/tools/testing/selftests/ublk/trace/count_ios_per_tid.bt @@ -0,0 +1,11 @@ +/* + * Tabulates and prints I/O completions per thread for the given device + * + * $1: dev_t +*/ +tracepoint:block:block_rq_complete +{ + if (args.dev == $1) { + @[tid] = count(); + } +} |