summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/common/auth.c4
-rw-r--r--drivers/nvme/host/apple.c1
-rw-r--r--drivers/nvme/host/auth.c8
-rw-r--r--drivers/nvme/host/core.c18
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fabrics.h6
-rw-r--r--drivers/nvme/host/fc.c24
-rw-r--r--drivers/nvme/host/ioctl.c9
-rw-r--r--drivers/nvme/host/multipath.c12
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c133
-rw-r--r--drivers/nvme/host/pr.c6
-rw-r--r--drivers/nvme/host/rdma.c1
-rw-r--r--drivers/nvme/host/tcp.c8
-rw-r--r--drivers/nvme/host/zns.c10
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c21
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c1
-rw-r--r--drivers/nvme/target/fc.c48
-rw-r--r--drivers/nvme/target/fcloop.c9
-rw-r--r--drivers/nvme/target/loop.c1
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/pci-epf.c14
-rw-r--r--drivers/nvme/target/rdma.c12
-rw-r--r--drivers/nvme/target/tcp.c8
27 files changed, 253 insertions, 123 deletions
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 1f51fbebd9fa..e07e7d4bf8b6 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -178,7 +178,7 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
if (!key)
return ERR_PTR(-ENOMEM);
- key_len = base64_decode(secret, allocated_len, key->key);
+ key_len = base64_decode(secret, allocated_len, key->key, true, BASE64_STD);
if (key_len < 0) {
pr_debug("base64 key decoding error %d\n",
key_len);
@@ -663,7 +663,7 @@ int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
if (ret)
goto out_free_digest;
- ret = base64_encode(digest, digest_len, enc);
+ ret = base64_encode(digest, digest_len, enc, true, BASE64_STD);
if (ret < hmac_len) {
ret = -ENOKEY;
goto out_free_digest;
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index f35d3f71d14f..15b3d07f8ccd 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1283,6 +1283,7 @@ static const struct nvme_ctrl_ops nvme_ctrl_ops = {
.reg_read64 = apple_nvme_reg_read64,
.free_ctrl = apple_nvme_free_ctrl,
.get_address = apple_nvme_get_address,
+ .get_virt_boundary = nvme_get_virt_boundary,
};
static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 012fcfc79a73..8f3ccb317e4d 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -36,6 +36,7 @@ struct nvme_dhchap_queue_context {
u8 status;
u8 dhgroup_id;
u8 hash_id;
+ u8 sc_c;
size_t hash_len;
u8 c1[64];
u8 c2[64];
@@ -154,6 +155,8 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
+ chap->sc_c = data->sc_c;
+
return size;
}
@@ -489,7 +492,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
- memset(buf, 0, sizeof(buf));
+ *buf = chap->sc_c;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
@@ -500,6 +503,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
strlen(ctrl->opts->host->nqn));
if (ret)
goto out;
+ memset(buf, 0, sizeof(buf));
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
@@ -1118,7 +1122,7 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
if (ctrl->dhchap_ctxs) {
for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
- kfree(ctrl->dhchap_ctxs);
+ kvfree(ctrl->dhchap_ctxs);
}
if (ctrl->host_key) {
nvme_auth_free_key(ctrl->host_key);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fa4181d7de73..7bf228df6001 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2069,13 +2069,13 @@ static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
}
static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
- struct queue_limits *lim)
+ struct queue_limits *lim, bool is_admin)
{
lim->max_hw_sectors = ctrl->max_hw_sectors;
lim->max_segments = min_t(u32, USHRT_MAX,
min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
lim->max_integrity_segments = ctrl->max_integrity_segments;
- lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
+ lim->virt_boundary_mask = ctrl->ops->get_virt_boundary(ctrl, is_admin);
lim->max_segment_size = UINT_MAX;
lim->dma_alignment = 3;
}
@@ -2177,7 +2177,7 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
int ret;
lim = queue_limits_start_update(ns->disk->queue);
- nvme_set_ctrl_limits(ns->ctrl, &lim);
+ nvme_set_ctrl_limits(ns->ctrl, &lim, false);
memflags = blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
@@ -2381,7 +2381,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
- nvme_set_ctrl_limits(ns->ctrl, &lim);
+ nvme_set_ctrl_limits(ns->ctrl, &lim, false);
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
if (!nvme_update_disk_info(ns, id, &lim))
@@ -2599,10 +2599,9 @@ static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
- return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
- data);
+ return nvme_ns_report_zones(disk->private_data, sector, nr_zones, args);
}
#else
#define nvme_report_zones NULL
@@ -3589,7 +3588,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
lim = queue_limits_start_update(ctrl->admin_q);
- nvme_set_ctrl_limits(ctrl, &lim);
+ nvme_set_ctrl_limits(ctrl, &lim, true);
ret = queue_limits_commit_update(ctrl->admin_q, &lim);
if (ret)
goto out_free;
@@ -4901,7 +4900,6 @@ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
*/
nvme_stop_keep_alive(ctrl);
blk_mq_destroy_queue(ctrl->admin_q);
- blk_put_queue(ctrl->admin_q);
if (ctrl->ops->flags & NVME_F_FABRICS) {
blk_mq_destroy_queue(ctrl->fabrics_q);
blk_put_queue(ctrl->fabrics_q);
@@ -5045,6 +5043,8 @@ static void nvme_free_ctrl(struct device *dev)
container_of(dev, struct nvme_ctrl, ctrl_device);
struct nvme_subsystem *subsys = ctrl->subsys;
+ if (ctrl->admin_q)
+ blk_put_queue(ctrl->admin_q);
if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance);
nvme_free_cels(ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 2e58a7ce1090..55a8afd2efd5 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -592,7 +592,7 @@ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
if (status > 0 && (status & NVME_STATUS_DNR))
return false;
- if (status == -EKEYREJECTED)
+ if (status == -EKEYREJECTED || status == -ENOKEY)
return false;
if (ctrl->opts->max_reconnects == -1 ||
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 1b58ee7d0dce..caf5503d0833 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -217,6 +217,12 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
min(opts->nr_poll_queues, num_online_cpus());
}
+static inline unsigned long nvmf_get_virt_boundary(struct nvme_ctrl *ctrl,
+ bool is_admin)
+{
+ return 0;
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 03987f497a5b..bc455fa98246 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -520,6 +520,8 @@ nvme_fc_free_rport(struct kref *ref)
WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
WARN_ON(!list_empty(&rport->ctrl_list));
+ WARN_ON(!list_empty(&rport->ls_req_list));
+ WARN_ON(!list_empty(&rport->ls_rcv_list));
/* remove from lport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
@@ -1468,14 +1470,14 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
{
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
&lsop->rqstbuf->rq_dis_assoc;
- struct nvme_fc_ctrl *ctrl, *ret = NULL;
+ struct nvme_fc_ctrl *ctrl, *tmp, *ret = NULL;
struct nvmefc_ls_rcv_op *oldls = NULL;
u64 association_id = be64_to_cpu(rqst->associd.association_id);
unsigned long flags;
spin_lock_irqsave(&rport->lock, flags);
- list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ list_for_each_entry_safe(ctrl, tmp, &rport->ctrl_list, ctrl_list) {
if (!nvme_fc_ctrl_get(ctrl))
continue;
spin_lock(&ctrl->lock);
@@ -1488,7 +1490,9 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
if (ret)
/* leave the ctrl get reference */
break;
+ spin_unlock_irqrestore(&rport->lock, flags);
nvme_fc_ctrl_put(ctrl);
+ spin_lock_irqsave(&rport->lock, flags);
}
spin_unlock_irqrestore(&rport->lock, flags);
@@ -2355,17 +2359,11 @@ nvme_fc_ctrl_free(struct kref *ref)
container_of(ref, struct nvme_fc_ctrl, ref);
unsigned long flags;
- if (ctrl->ctrl.tagset)
- nvme_remove_io_tag_set(&ctrl->ctrl);
-
/* remove from rport list */
spin_lock_irqsave(&ctrl->rport->lock, flags);
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
- nvme_remove_admin_tag_set(&ctrl->ctrl);
-
kfree(ctrl->queues);
put_device(ctrl->dev);
@@ -3259,13 +3257,20 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
+
/*
* kill the association on the link side. this will block
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
+
+ if (ctrl->ctrl.tagset)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void
@@ -3360,6 +3365,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
+ .get_virt_boundary = nvmf_get_virt_boundary,
};
static void
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index c212fa952c0f..a9c097dacad6 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -398,14 +398,15 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu);
}
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
- unsigned issue_flags)
+static void nvme_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_uring_cmd *ioucmd = io_uring_cmd_from_tw(tw_req);
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags);
+ io_uring_cmd_done32(ioucmd, pdu->status, pdu->result,
+ IO_URING_CMD_TASK_WORK_ISSUE_FLAGS);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
@@ -446,7 +447,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct iov_iter iter;
struct iov_iter *map_iter = NULL;
struct request *req;
- blk_opf_t rq_flags = REQ_ALLOC_CACHE;
+ blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
int ret;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 3da980dc60d9..174027d1cc19 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -182,12 +182,14 @@ void nvme_mpath_start_request(struct request *rq)
struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns->head->disk;
- if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
+ if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) &&
+ !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
atomic_inc(&ns->ctrl->nr_active);
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
}
- if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
+ if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
+ (nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
@@ -574,7 +576,7 @@ static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16],
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct nvme_ns_head *head = disk->private_data;
struct nvme_ns *ns;
@@ -583,7 +585,7 @@ static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (ns)
- ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
+ ret = nvme_ns_report_zones(ns, sector, nr_zones, args);
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
@@ -791,7 +793,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
return;
}
nvme_add_ns_head_cdev(head);
- kblockd_schedule_work(&head->partition_scan_work);
+ queue_work(nvme_wq, &head->partition_scan_work);
}
nvme_mpath_add_sysfs_link(ns->head);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 102fae6a231c..9a5f28c5103c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -558,6 +558,12 @@ static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
return head->pi_type && head->ms == head->pi_size;
}
+static inline unsigned long nvme_get_virt_boundary(struct nvme_ctrl *ctrl,
+ bool is_admin)
+{
+ return NVME_CTRL_PAGE_SIZE - 1;
+}
+
struct nvme_ctrl_ops {
const char *name;
struct module *module;
@@ -578,6 +584,7 @@ struct nvme_ctrl_ops {
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*print_device_info)(struct nvme_ctrl *ctrl);
bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
+ unsigned long (*get_virt_boundary)(struct nvme_ctrl *ctrl, bool is_admin);
};
/*
@@ -1108,7 +1115,7 @@ struct nvme_zone_info {
};
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones, struct blk_report_zones_args *args);
int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
struct nvme_zone_info *zi);
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c916176bd9f0..0e4caeab739c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -260,8 +260,20 @@ enum nvme_iod_flags {
/* single segment dma mapping */
IOD_SINGLE_SEGMENT = 1U << 2,
+ /* Data payload contains p2p memory */
+ IOD_DATA_P2P = 1U << 3,
+
+ /* Metadata contains p2p memory */
+ IOD_META_P2P = 1U << 4,
+
+ /* Data payload contains MMIO memory */
+ IOD_DATA_MMIO = 1U << 5,
+
+ /* Metadata contains MMIO memory */
+ IOD_META_MMIO = 1U << 6,
+
/* Metadata using non-coalesced MPTR */
- IOD_SINGLE_META_SEGMENT = 1U << 5,
+ IOD_SINGLE_META_SEGMENT = 1U << 7,
};
struct nvme_dma_vec {
@@ -613,9 +625,22 @@ static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
- if (nvme_req(req)->flags & NVME_REQ_USERCMD)
- return SGL_FORCED;
- if (req->nr_integrity_segments > 1)
+ /*
+ * When the controller is capable of using SGL, there are
+ * several conditions that we force to use it:
+ *
+ * 1. A request containing page gaps within the controller's
+ * mask can not use the PRP format.
+ *
+ * 2. User commands use SGL because that lets the device
+ * validate the requested transfer lengths.
+ *
+ * 3. Multiple integrity segments must use SGL as that's the
+ * only way to describe such a command in NVMe.
+ */
+ if (req_phys_gap_mask(req) & (NVME_CTRL_PAGE_SIZE - 1) ||
+ nvme_req(req)->flags & NVME_REQ_USERCMD ||
+ req->nr_integrity_segments > 1)
return SGL_FORCED;
return SGL_SUPPORTED;
}
@@ -685,20 +710,20 @@ static void nvme_free_descriptors(struct request *req)
}
}
-static void nvme_free_prps(struct request *req)
+static void nvme_free_prps(struct request *req, unsigned int attrs)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
unsigned int i;
for (i = 0; i < iod->nr_dma_vecs; i++)
- dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr,
- iod->dma_vecs[i].len, rq_dma_dir(req));
+ dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr,
+ iod->dma_vecs[i].len, rq_dma_dir(req), attrs);
mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
}
static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
- struct nvme_sgl_desc *sg_list)
+ struct nvme_sgl_desc *sg_list, unsigned int attrs)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
enum dma_data_direction dir = rq_dma_dir(req);
@@ -707,22 +732,25 @@ static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
unsigned int i;
if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
- dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir);
+ dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir,
+ attrs);
return;
}
for (i = 0; i < len / sizeof(*sg_list); i++)
- dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
- le32_to_cpu(sg_list[i].length), dir);
+ dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr),
+ le32_to_cpu(sg_list[i].length), dir, attrs);
}
static void nvme_unmap_metadata(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
enum dma_data_direction dir = rq_dma_dir(req);
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct device *dma_dev = nvmeq->dev->dev;
struct nvme_sgl_desc *sge = iod->meta_descriptor;
+ unsigned int attrs = 0;
if (iod->flags & IOD_SINGLE_META_SEGMENT) {
dma_unmap_page(dma_dev, iod->meta_dma,
@@ -731,13 +759,20 @@ static void nvme_unmap_metadata(struct request *req)
return;
}
- if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
- iod->meta_total_len)) {
+ if (iod->flags & IOD_META_P2P)
+ map = PCI_P2PDMA_MAP_BUS_ADDR;
+ else if (iod->flags & IOD_META_MMIO) {
+ map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
+ attrs |= DMA_ATTR_MMIO;
+ }
+
+ if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state,
+ iod->meta_total_len, map)) {
if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
- nvme_free_sgls(req, sge, &sge[1]);
+ nvme_free_sgls(req, sge, &sge[1], attrs);
else
- dma_unmap_page(dma_dev, iod->meta_dma,
- iod->meta_total_len, dir);
+ dma_unmap_phys(dma_dev, iod->meta_dma,
+ iod->meta_total_len, dir, attrs);
}
if (iod->meta_descriptor)
@@ -747,9 +782,11 @@ static void nvme_unmap_metadata(struct request *req)
static void nvme_unmap_data(struct request *req)
{
+ enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct device *dma_dev = nvmeq->dev->dev;
+ unsigned int attrs = 0;
if (iod->flags & IOD_SINGLE_SEGMENT) {
static_assert(offsetof(union nvme_data_ptr, prp1) ==
@@ -759,12 +796,20 @@ static void nvme_unmap_data(struct request *req)
return;
}
- if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
+ if (iod->flags & IOD_DATA_P2P)
+ map = PCI_P2PDMA_MAP_BUS_ADDR;
+ else if (iod->flags & IOD_DATA_MMIO) {
+ map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
+ attrs |= DMA_ATTR_MMIO;
+ }
+
+ if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
+ map)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
nvme_free_sgls(req, iod->descriptors[0],
- &iod->cmd.common.dptr.sgl);
+ &iod->cmd.common.dptr.sgl, attrs);
else
- nvme_free_prps(req);
+ nvme_free_prps(req, attrs);
}
if (iod->nr_descriptors)
@@ -1035,6 +1080,19 @@ static blk_status_t nvme_map_data(struct request *req)
if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
return iter.status;
+ switch (iter.p2pdma.map) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ iod->flags |= IOD_DATA_P2P;
+ break;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ iod->flags |= IOD_DATA_MMIO;
+ break;
+ case PCI_P2PDMA_MAP_NONE:
+ break;
+ default:
+ return BLK_STS_RESOURCE;
+ }
+
if (use_sgl == SGL_FORCED ||
(use_sgl == SGL_SUPPORTED &&
(sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
@@ -1042,7 +1100,7 @@ static blk_status_t nvme_map_data(struct request *req)
return nvme_pci_setup_data_prp(req, &iter);
}
-static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
+static blk_status_t nvme_pci_setup_meta_iter(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
unsigned int entries = req->nr_integrity_segments;
@@ -1057,6 +1115,19 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
&iod->meta_dma_state, &iter))
return iter.status;
+ switch (iter.p2pdma.map) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ iod->flags |= IOD_META_P2P;
+ break;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ iod->flags |= IOD_META_MMIO;
+ break;
+ case PCI_P2PDMA_MAP_NONE:
+ break;
+ default:
+ return BLK_STS_RESOURCE;
+ }
+
if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
entries = 1;
@@ -1072,8 +1143,12 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
* descriptor provides an explicit length, so we're relying on that
* mechanism to catch any misunderstandings between the application and
* device.
+ *
+ * P2P DMA also needs to use the blk_dma_iter method, so mptr setup
+ * leverages this routine when that happens.
*/
- if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) {
+ if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) ||
+ (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {
iod->cmd.common.metadata = cpu_to_le64(iter.addr);
iod->meta_total_len = iter.len;
iod->meta_dma = iter.addr;
@@ -1114,6 +1189,9 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = rq_integrity_vec(req);
+ if (is_pci_p2pdma_page(bv.bv_page))
+ return nvme_pci_setup_meta_iter(req);
+
iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
@@ -1128,7 +1206,7 @@ static blk_status_t nvme_map_metadata(struct request *req)
if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
nvme_pci_metadata_use_sgls(req))
- return nvme_pci_setup_meta_sgls(req);
+ return nvme_pci_setup_meta_iter(req);
return nvme_pci_setup_meta_mptr(req);
}
@@ -2906,6 +2984,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
pci_set_master(pdev);
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
+ dev_dbg(dev->ctrl.device, "reading CSTS register failed\n");
result = -ENODEV;
goto disable;
}
@@ -3243,6 +3322,14 @@ static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
return dma_pci_p2pdma_supported(dev->dev);
}
+static unsigned long nvme_pci_get_virt_boundary(struct nvme_ctrl *ctrl,
+ bool is_admin)
+{
+ if (!nvme_ctrl_sgl_supported(ctrl) || is_admin)
+ return NVME_CTRL_PAGE_SIZE - 1;
+ return 0;
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
@@ -3257,6 +3344,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
+ .get_virt_boundary = nvme_pci_get_virt_boundary,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -3522,6 +3610,7 @@ out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl);
out_put_ctrl:
nvme_put_ctrl(&dev->ctrl);
+ dev_err_probe(&pdev->dev, result, "probe failed\n");
return result;
}
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index ca6a74607b13..ad2ecc2f49a9 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -228,7 +228,8 @@ retry:
static int nvme_pr_read_keys(struct block_device *bdev,
struct pr_keys *keys_info)
{
- u32 rse_len, num_keys = keys_info->num_keys;
+ size_t rse_len;
+ u32 num_keys = keys_info->num_keys;
struct nvme_reservation_status_ext *rse;
int ret, i;
bool eds;
@@ -238,6 +239,9 @@ static int nvme_pr_read_keys(struct block_device *bdev,
* enough to get enough keys to fill the return keys buffer.
*/
rse_len = struct_size(rse, regctl_eds, num_keys);
+ if (rse_len > U32_MAX)
+ return -EINVAL;
+
rse = kzalloc(rse_len, GFP_KERNEL);
if (!rse)
return -ENOMEM;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 190a4cfa8a5e..35c0822edb2d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2202,6 +2202,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
.stop_ctrl = nvme_rdma_stop_ctrl,
+ .get_virt_boundary = nvme_get_virt_boundary,
};
/*
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1413788ca7d5..69cb04406b47 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1081,6 +1081,9 @@ static void nvme_tcp_write_space(struct sock *sk)
queue = sk->sk_user_data;
if (likely(queue && sk_stream_is_writeable(sk))) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ /* Ensure pending TLS partial records are retried */
+ if (nvme_tcp_queue_tls(queue))
+ queue->write_space(sk);
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
read_unlock_bh(&sk->sk_callback_lock);
@@ -1831,7 +1834,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
sk_set_memalloc(queue->sock->sk);
if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
- ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
+ ret = kernel_bind(queue->sock, (struct sockaddr_unsized *)&ctrl->src_addr,
sizeof(ctrl->src_addr));
if (ret) {
dev_err(nctrl->device,
@@ -1869,7 +1872,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
dev_dbg(nctrl->device, "connecting queue %d\n",
nvme_tcp_queue_id(queue));
- ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
+ ret = kernel_connect(queue->sock, (struct sockaddr_unsized *)&ctrl->addr,
sizeof(ctrl->addr), 0);
if (ret) {
dev_err(nctrl->device,
@@ -2862,6 +2865,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.delete_ctrl = nvme_tcp_delete_ctrl,
.get_address = nvme_tcp_get_address,
.stop_ctrl = nvme_tcp_stop_ctrl,
+ .get_virt_boundary = nvmf_get_virt_boundary,
};
static bool
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index cce4c5b55aa9..deea2dbef5b8 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -148,8 +148,8 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
static int nvme_zone_parse_entry(struct nvme_ns *ns,
struct nvme_zone_descriptor *entry,
- unsigned int idx, report_zones_cb cb,
- void *data)
+ unsigned int idx,
+ struct blk_report_zones_args *args)
{
struct nvme_ns_head *head = ns->head;
struct blk_zone zone = { };
@@ -169,11 +169,11 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
else
zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp));
- return cb(&zone, idx, data);
+ return disk_report_zone(ns->disk, &zone, idx, args);
}
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct nvme_zone_report *report;
struct nvme_command c = { };
@@ -213,7 +213,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = nvme_zone_parse_entry(ns, &report->entries[i],
- zone_idx, cb, data);
+ zone_idx, args);
if (ret)
goto out_free;
zone_idx++;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 3e378153a781..3da31bb1183e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -708,7 +708,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/*
* We don't really have a practical limit on the number of abort
- * comands. But we don't do anything useful for abort either, so
+ * commands. But we don't do anything useful for abort either, so
* no point in allowing more abort commands than the spec requires.
*/
id->acl = 3;
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index b340380f3892..2eadeb7e06f2 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -367,21 +367,22 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
- memset(buf, 0, 4);
+ *buf = req->sq->sc_c;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, "HostHost", 8);
if (ret)
goto out;
+ memset(buf, 0, 4);
ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
if (ret)
goto out;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
- ret = crypto_shash_update(shash, ctrl->subsysnqn,
- strlen(ctrl->subsysnqn));
+ ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
+ strlen(ctrl->subsys->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_final(shash, response);
@@ -428,7 +429,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
}
transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
- ctrl->subsysnqn);
+ ctrl->subsys->subsysnqn);
if (IS_ERR(transformed_key)) {
ret = PTR_ERR(transformed_key);
goto out_free_tfm;
@@ -483,8 +484,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, "Controller", 10);
if (ret)
goto out;
- ret = crypto_shash_update(shash, ctrl->subsysnqn,
- strlen(ctrl->subsysnqn));
+ ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
+ strlen(ctrl->subsys->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_update(shash, buf, 1);
@@ -574,7 +575,7 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
return;
}
ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
- sq->ctrl->subsysnqn,
+ sq->ctrl->subsys->subsysnqn,
sq->ctrl->hostnqn, &digest);
if (ret) {
pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
@@ -589,8 +590,10 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
goto out_free_digest;
}
#ifdef CONFIG_NVME_TARGET_TCP_TLS
- tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn,
- sq->ctrl->shash_id, tls_psk, psk_len, digest);
+ tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn,
+ sq->ctrl->subsys->subsysnqn,
+ sq->ctrl->shash_id, tls_psk, psk_len,
+ digest);
if (IS_ERR(tls_key)) {
pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
__func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 5d7d483bfbe3..cc88e5a28c8a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nvmet_wq);
* - the nvmet_transports array
*
* When updating any of those lists/structures write lock should be obtained,
- * while when reading (popolating discovery log page or checking host-subsystem
+ * while when reading (populating discovery log page or checking host-subsystem
* link) read lock is obtained to allow concurrent reads.
*/
DECLARE_RWSEM(nvmet_config_sem);
@@ -1628,7 +1628,6 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
- memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref);
@@ -1903,6 +1902,8 @@ static void nvmet_subsys_free(struct kref *ref)
struct nvmet_subsys *subsys =
container_of(ref, struct nvmet_subsys, ref);
+ WARN_ON_ONCE(!list_empty(&subsys->ctrls));
+ WARN_ON_ONCE(!list_empty(&subsys->hosts));
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
nvmet_debugfs_subsys_free(subsys);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index bf01ec414c55..5946681cb0e3 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -43,6 +43,7 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
data->auth_protocol[0].dhchap.halen,
data->auth_protocol[0].dhchap.dhlen);
req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ req->sq->sc_c = data->sc_c;
if (data->sc_c != NVME_AUTH_SECP_NOSC) {
if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 7d84527d5a43..0d9784004c9b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -490,8 +490,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
sizeof(*discon_rqst) + sizeof(*discon_acc) +
tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
- dev_info(tgtport->dev,
- "{%d:%d} send Disconnect Association failed: ENOMEM\n",
+ pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n",
tgtport->fc_target_port.port_num, assoc->a_id);
return;
}
@@ -513,8 +512,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
nvmet_fc_disconnect_assoc_done);
if (ret) {
- dev_info(tgtport->dev,
- "{%d:%d} XMT Disconnect Association failed: %d\n",
+ pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n",
tgtport->fc_target_port.port_num, assoc->a_id, ret);
kfree(lsop);
}
@@ -1187,8 +1185,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
if (oldls)
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
ida_free(&tgtport->assoc_cnt, assoc->a_id);
- dev_info(tgtport->dev,
- "{%d:%d} Association freed\n",
+ pr_info("{%d:%d}: Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
kfree(assoc);
}
@@ -1224,8 +1221,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
flush_workqueue(assoc->queues[i]->work_q);
}
- dev_info(tgtport->dev,
- "{%d:%d} Association deleted\n",
+ pr_info("{%d:%d}: Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
nvmet_fc_tgtport_put(tgtport);
@@ -1716,9 +1712,9 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
}
if (ret) {
- dev_err(tgtport->dev,
- "Create Association LS failed: %s\n",
- validation_errors[ret]);
+ pr_err("{%d}: Create Association LS failed: %s\n",
+ tgtport->fc_target_port.port_num,
+ validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd,
FCNVME_RJT_RC_LOGIC,
@@ -1730,8 +1726,7 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
atomic_set(&queue->connected, 1);
queue->sqhd = 0; /* best place to init value */
- dev_info(tgtport->dev,
- "{%d:%d} Association created\n",
+ pr_info("{%d:%d}: Association created\n",
tgtport->fc_target_port.port_num, iod->assoc->a_id);
/* format a response */
@@ -1809,9 +1804,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
}
if (ret) {
- dev_err(tgtport->dev,
- "Create Connection LS failed: %s\n",
- validation_errors[ret]);
+ pr_err("{%d}: Create Connection LS failed: %s\n",
+ tgtport->fc_target_port.port_num,
+ validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
@@ -1871,9 +1866,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
}
if (ret || !assoc) {
- dev_err(tgtport->dev,
- "Disconnect LS failed: %s\n",
- validation_errors[ret]);
+ pr_err("{%d}: Disconnect LS failed: %s\n",
+ tgtport->fc_target_port.port_num,
+ validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
@@ -1907,8 +1902,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
spin_unlock_irqrestore(&tgtport->lock, flags);
if (oldls) {
- dev_info(tgtport->dev,
- "{%d:%d} Multiple Disconnect Association LS's "
+ pr_info("{%d:%d}: Multiple Disconnect Association LS's "
"received\n",
tgtport->fc_target_port.port_num, assoc->a_id);
/* overwrite good response with bogus failure */
@@ -2051,8 +2045,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
- dev_info(tgtport->dev,
- "RCV %s LS failed: payload too large (%d)\n",
+ pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n",
+ tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "",
lsreqbuf_len);
@@ -2060,8 +2054,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
}
if (!nvmet_fc_tgtport_get(tgtport)) {
- dev_info(tgtport->dev,
- "RCV %s LS failed: target deleting\n",
+ pr_info("{%d}: RCV %s LS failed: target deleting\n",
+ tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
return -ESHUTDOWN;
@@ -2069,8 +2063,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod = nvmet_fc_alloc_ls_iod(tgtport);
if (!iod) {
- dev_info(tgtport->dev,
- "RCV %s LS failed: context allocation failed\n",
+ pr_info("{%d}: RCV %s LS failed: context allocation failed\n",
+ tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
nvmet_fc_tgtport_put(tgtport);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 5dffcc5becae..c30e9a3e014f 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -254,7 +254,6 @@ struct fcloop_nport {
struct fcloop_lsreq {
struct nvmefc_ls_req *lsreq;
struct nvmefc_ls_rsp ls_rsp;
- int lsdir; /* H2T or T2H */
int status;
struct list_head ls_list; /* fcloop_rport->ls_list */
};
@@ -1111,8 +1110,10 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
rport->nport->rport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (put_port)
+ if (put_port) {
+ WARN_ON(!list_empty(&rport->ls_list));
fcloop_nport_put(rport->nport);
+ }
}
static void
@@ -1130,8 +1131,10 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
tport->nport->tport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (put_port)
+ if (put_port) {
+ WARN_ON(!list_empty(&tport->ls_list));
fcloop_nport_put(tport->nport);
+ }
}
#define FCLOOP_HW_QUEUES 4
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f85a8441bcc6..fc8e7c9ad858 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -511,6 +511,7 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
.submit_async_event = nvme_loop_submit_async_event,
.delete_ctrl = nvme_loop_delete_ctrl_host,
.get_address = nvmf_get_address,
+ .get_virt_boundary = nvme_get_virt_boundary,
};
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 51df72f5e89b..b664b584fdc8 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -159,6 +159,7 @@ struct nvmet_sq {
bool authenticated;
struct delayed_work auth_expired_work;
u16 dhchap_tid;
+ u8 sc_c;
u8 dhchap_status;
u8 dhchap_step;
u8 *dhchap_c1;
@@ -284,7 +285,6 @@ struct nvmet_ctrl {
__le32 *changed_ns_list;
u32 nr_changed_ns;
- char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN];
struct device *p2p_client;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 0c361b1e3566..96648ec2fadb 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -150,7 +150,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/
- memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
+ memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/* use fabric id-ctrl values */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 2e78397a7373..f858a6c9d7cb 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -320,12 +320,14 @@ static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
nvme_epf->dma_enabled = true;
dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
- dma_chan_name(chan),
- dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+ dma_chan_name(nvme_epf->dma_rx_chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
+ dma_rx_chan)));
dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
- dma_chan_name(chan),
- dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+ dma_chan_name(nvme_epf->dma_tx_chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
+ dma_tx_chan)));
return;
@@ -2325,6 +2327,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
return ret;
}
+ nvmet_pci_epf_init_dma(nvme_epf);
+
/* Set device ID, class, etc. */
epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
@@ -2422,8 +2426,6 @@ static int nvmet_pci_epf_bind(struct pci_epf *epf)
if (ret)
return ret;
- nvmet_pci_epf_init_dma(nvme_epf);
-
return 0;
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 0485e25ab797..9c12b2361a6d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -367,7 +367,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *cmds;
int ret = -EINVAL, i;
- cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
+ cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
if (!cmds)
goto out;
@@ -382,7 +382,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
out_free:
while (--i >= 0)
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
- kfree(cmds);
+ kvfree(cmds);
out:
return ERR_PTR(ret);
}
@@ -394,7 +394,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
for (i = 0; i < nr_cmds; i++)
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
- kfree(cmds);
+ kvfree(cmds);
}
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
@@ -455,7 +455,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
NUMA_NO_NODE, false, true))
goto out;
- queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
+ queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
GFP_KERNEL);
if (!queue->rsps)
goto out_free_sbitmap;
@@ -473,7 +473,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
out_free:
while (--i >= 0)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
- kfree(queue->rsps);
+ kvfree(queue->rsps);
out_free_sbitmap:
sbitmap_free(&queue->rsp_tags);
out:
@@ -487,7 +487,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
for (i = 0; i < nr_rsps; i++)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
- kfree(queue->rsps);
+ kvfree(queue->rsps);
sbitmap_free(&queue->rsp_tags);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 470bf37e5a63..15416ff0eac4 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1484,7 +1484,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
struct nvmet_tcp_cmd *cmds;
int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
- cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
+ cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
if (!cmds)
goto out;
@@ -1500,7 +1500,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
out_free:
while (--i >= 0)
nvmet_tcp_free_cmd(cmds + i);
- kfree(cmds);
+ kvfree(cmds);
out:
return ret;
}
@@ -1514,7 +1514,7 @@ static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
nvmet_tcp_free_cmd(cmds + i);
nvmet_tcp_free_cmd(&queue->connect);
- kfree(cmds);
+ kvfree(cmds);
}
static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
@@ -2055,7 +2055,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
if (so_priority > 0)
sock_set_priority(port->sock->sk, so_priority);
- ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
+ ret = kernel_bind(port->sock, (struct sockaddr_unsized *)&port->addr,
sizeof(port->addr));
if (ret) {
pr_err("failed to bind port socket %d\n", ret);