diff options
Diffstat (limited to 'drivers/nvme/target')
| -rw-r--r-- | drivers/nvme/target/admin-cmd.c | 2 | ||||
| -rw-r--r-- | drivers/nvme/target/auth.c | 18 | ||||
| -rw-r--r-- | drivers/nvme/target/core.c | 5 | ||||
| -rw-r--r-- | drivers/nvme/target/fc.c | 48 | ||||
| -rw-r--r-- | drivers/nvme/target/fcloop.c | 9 | ||||
| -rw-r--r-- | drivers/nvme/target/nvmet.h | 1 | ||||
| -rw-r--r-- | drivers/nvme/target/passthru.c | 2 | ||||
| -rw-r--r-- | drivers/nvme/target/pci-epf.c | 14 | ||||
| -rw-r--r-- | drivers/nvme/target/rdma.c | 12 | ||||
| -rw-r--r-- | drivers/nvme/target/tcp.c | 6 |
10 files changed, 59 insertions, 58 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 3e378153a781..3da31bb1183e 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -708,7 +708,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) /* * We don't really have a practical limit on the number of abort - * comands. But we don't do anything useful for abort either, so + * commands. But we don't do anything useful for abort either, so * no point in allowing more abort commands than the spec requires. */ id->acl = 3; diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 300d5e032f6d..2eadeb7e06f2 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -381,8 +381,8 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; - ret = crypto_shash_update(shash, ctrl->subsysnqn, - strlen(ctrl->subsysnqn)); + ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn, + strlen(ctrl->subsys->subsysnqn)); if (ret) goto out; ret = crypto_shash_final(shash, response); @@ -429,7 +429,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, } transformed_key = nvme_auth_transform_key(ctrl->ctrl_key, - ctrl->subsysnqn); + ctrl->subsys->subsysnqn); if (IS_ERR(transformed_key)) { ret = PTR_ERR(transformed_key); goto out_free_tfm; @@ -484,8 +484,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, ret = crypto_shash_update(shash, "Controller", 10); if (ret) goto out; - ret = crypto_shash_update(shash, ctrl->subsysnqn, - strlen(ctrl->subsysnqn)); + ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn, + strlen(ctrl->subsys->subsysnqn)); if (ret) goto out; ret = crypto_shash_update(shash, buf, 1); @@ -575,7 +575,7 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq) return; } ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len, - sq->ctrl->subsysnqn, + sq->ctrl->subsys->subsysnqn, sq->ctrl->hostnqn, &digest); if (ret) { pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n", @@ -590,8 +590,10 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq) goto out_free_digest; } #ifdef CONFIG_NVME_TARGET_TCP_TLS - tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn, - sq->ctrl->shash_id, tls_psk, psk_len, digest); + tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, + sq->ctrl->subsys->subsysnqn, + sq->ctrl->shash_id, tls_psk, psk_len, + digest); if (IS_ERR(tls_key)) { pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key)); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 5d7d483bfbe3..cc88e5a28c8a 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nvmet_wq); * - the nvmet_transports array * * When updating any of those lists/structures write lock should be obtained, - * while when reading (popolating discovery log page or checking host-subsystem + * while when reading (populating discovery log page or checking host-subsystem * link) read lock is obtained to allow concurrent reads. */ DECLARE_RWSEM(nvmet_config_sem); @@ -1628,7 +1628,6 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args) INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); - memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); kref_init(&ctrl->ref); @@ -1903,6 +1902,8 @@ static void nvmet_subsys_free(struct kref *ref) struct nvmet_subsys *subsys = container_of(ref, struct nvmet_subsys, ref); + WARN_ON_ONCE(!list_empty(&subsys->ctrls)); + WARN_ON_ONCE(!list_empty(&subsys->hosts)); WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); nvmet_debugfs_subsys_free(subsys); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 7d84527d5a43..0d9784004c9b 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -490,8 +490,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) sizeof(*discon_rqst) + sizeof(*discon_acc) + tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); if (!lsop) { - dev_info(tgtport->dev, - "{%d:%d} send Disconnect Association failed: ENOMEM\n", + pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n", tgtport->fc_target_port.port_num, assoc->a_id); return; } @@ -513,8 +512,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) ret = nvmet_fc_send_ls_req_async(tgtport, lsop, nvmet_fc_disconnect_assoc_done); if (ret) { - dev_info(tgtport->dev, - "{%d:%d} XMT Disconnect Association failed: %d\n", + pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n", tgtport->fc_target_port.port_num, assoc->a_id, ret); kfree(lsop); } @@ -1187,8 +1185,7 @@ nvmet_fc_target_assoc_free(struct kref *ref) if (oldls) nvmet_fc_xmt_ls_rsp(tgtport, oldls); ida_free(&tgtport->assoc_cnt, assoc->a_id); - dev_info(tgtport->dev, - "{%d:%d} Association freed\n", + pr_info("{%d:%d}: Association freed\n", tgtport->fc_target_port.port_num, assoc->a_id); kfree(assoc); } @@ -1224,8 +1221,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) flush_workqueue(assoc->queues[i]->work_q); } - dev_info(tgtport->dev, - "{%d:%d} Association deleted\n", + pr_info("{%d:%d}: Association deleted\n", tgtport->fc_target_port.port_num, assoc->a_id); nvmet_fc_tgtport_put(tgtport); @@ -1716,9 +1712,9 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, } if (ret) { - dev_err(tgtport->dev, - "Create Association LS failed: %s\n", - validation_errors[ret]); + pr_err("{%d}: Create Association LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, FCNVME_RJT_RC_LOGIC, @@ -1730,8 +1726,7 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, atomic_set(&queue->connected, 1); queue->sqhd = 0; /* best place to init value */ - dev_info(tgtport->dev, - "{%d:%d} Association created\n", + pr_info("{%d:%d}: Association created\n", tgtport->fc_target_port.port_num, iod->assoc->a_id); /* format a response */ @@ -1809,9 +1804,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, } if (ret) { - dev_err(tgtport->dev, - "Create Connection LS failed: %s\n", - validation_errors[ret]); + pr_err("{%d}: Create Connection LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? @@ -1871,9 +1866,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, } if (ret || !assoc) { - dev_err(tgtport->dev, - "Disconnect LS failed: %s\n", - validation_errors[ret]); + pr_err("{%d}: Disconnect LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? @@ -1907,8 +1902,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, spin_unlock_irqrestore(&tgtport->lock, flags); if (oldls) { - dev_info(tgtport->dev, - "{%d:%d} Multiple Disconnect Association LS's " + pr_info("{%d:%d}: Multiple Disconnect Association LS's " "received\n", tgtport->fc_target_port.port_num, assoc->a_id); /* overwrite good response with bogus failure */ @@ -2051,8 +2045,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { - dev_info(tgtport->dev, - "RCV %s LS failed: payload too large (%d)\n", + pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n", + tgtport->fc_target_port.port_num, (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : "", lsreqbuf_len); @@ -2060,8 +2054,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, } if (!nvmet_fc_tgtport_get(tgtport)) { - dev_info(tgtport->dev, - "RCV %s LS failed: target deleting\n", + pr_info("{%d}: RCV %s LS failed: target deleting\n", + tgtport->fc_target_port.port_num, (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); return -ESHUTDOWN; @@ -2069,8 +2063,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, iod = nvmet_fc_alloc_ls_iod(tgtport); if (!iod) { - dev_info(tgtport->dev, - "RCV %s LS failed: context allocation failed\n", + pr_info("{%d}: RCV %s LS failed: context allocation failed\n", + tgtport->fc_target_port.port_num, (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); nvmet_fc_tgtport_put(tgtport); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 5dffcc5becae..c30e9a3e014f 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -254,7 +254,6 @@ struct fcloop_nport { struct fcloop_lsreq { struct nvmefc_ls_req *lsreq; struct nvmefc_ls_rsp ls_rsp; - int lsdir; /* H2T or T2H */ int status; struct list_head ls_list; /* fcloop_rport->ls_list */ }; @@ -1111,8 +1110,10 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) rport->nport->rport = NULL; spin_unlock_irqrestore(&fcloop_lock, flags); - if (put_port) + if (put_port) { + WARN_ON(!list_empty(&rport->ls_list)); fcloop_nport_put(rport->nport); + } } static void @@ -1130,8 +1131,10 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) tport->nport->tport = NULL; spin_unlock_irqrestore(&fcloop_lock, flags); - if (put_port) + if (put_port) { + WARN_ON(!list_empty(&tport->ls_list)); fcloop_nport_put(tport->nport); + } } #define FCLOOP_HW_QUEUES 4 diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index f3b09f4099f0..b664b584fdc8 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -285,7 +285,6 @@ struct nvmet_ctrl { __le32 *changed_ns_list; u32 nr_changed_ns; - char subsysnqn[NVMF_NQN_FIELD_LEN]; char hostnqn[NVMF_NQN_FIELD_LEN]; struct device *p2p_client; diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 0c361b1e3566..96648ec2fadb 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -150,7 +150,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) * code path with duplicate ctrl subsysnqn. In order to prevent that we * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. */ - memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); + memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); /* use fabric id-ctrl values */ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index 2e78397a7373..f858a6c9d7cb 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -320,12 +320,14 @@ static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf) nvme_epf->dma_enabled = true; dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", - dma_chan_name(chan), - dma_get_max_seg_size(dmaengine_get_dma_device(chan))); + dma_chan_name(nvme_epf->dma_rx_chan), + dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf-> + dma_rx_chan))); dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", - dma_chan_name(chan), - dma_get_max_seg_size(dmaengine_get_dma_device(chan))); + dma_chan_name(nvme_epf->dma_tx_chan), + dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf-> + dma_tx_chan))); return; @@ -2325,6 +2327,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf) return ret; } + nvmet_pci_epf_init_dma(nvme_epf); + /* Set device ID, class, etc. */ epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; @@ -2422,8 +2426,6 @@ static int nvmet_pci_epf_bind(struct pci_epf *epf) if (ret) return ret; - nvmet_pci_epf_init_dma(nvme_epf); - return 0; } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 0485e25ab797..9c12b2361a6d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -367,7 +367,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *cmds; int ret = -EINVAL, i; - cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); + cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); if (!cmds) goto out; @@ -382,7 +382,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, out_free: while (--i >= 0) nvmet_rdma_free_cmd(ndev, cmds + i, admin); - kfree(cmds); + kvfree(cmds); out: return ERR_PTR(ret); } @@ -394,7 +394,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, for (i = 0; i < nr_cmds; i++) nvmet_rdma_free_cmd(ndev, cmds + i, admin); - kfree(cmds); + kvfree(cmds); } static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, @@ -455,7 +455,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) NUMA_NO_NODE, false, true)) goto out; - queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), + queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), GFP_KERNEL); if (!queue->rsps) goto out_free_sbitmap; @@ -473,7 +473,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) out_free: while (--i >= 0) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); - kfree(queue->rsps); + kvfree(queue->rsps); out_free_sbitmap: sbitmap_free(&queue->rsp_tags); out: @@ -487,7 +487,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) for (i = 0; i < nr_rsps; i++) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); - kfree(queue->rsps); + kvfree(queue->rsps); sbitmap_free(&queue->rsp_tags); } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index d543da09ef8e..15416ff0eac4 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1484,7 +1484,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) struct nvmet_tcp_cmd *cmds; int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; - cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); + cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); if (!cmds) goto out; @@ -1500,7 +1500,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) out_free: while (--i >= 0) nvmet_tcp_free_cmd(cmds + i); - kfree(cmds); + kvfree(cmds); out: return ret; } @@ -1514,7 +1514,7 @@ static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_free_cmd(cmds + i); nvmet_tcp_free_cmd(&queue->connect); - kfree(cmds); + kvfree(cmds); } static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) |
