summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bfa/bfad.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fnic/fnic_res.c1
-rw-r--r--drivers/scsi/hosts.c19
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/isci/task.h10
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c249
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h25
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h17
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c15
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla1280.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1777
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h112
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c5
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_debug.c132
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c104
-rw-r--r--drivers/scsi/scsi_logging.c21
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c74
-rw-r--r--drivers/scsi/scsi_sysfs.c79
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sd_zbc.c20
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c49
-rw-r--r--drivers/scsi/st.c89
-rw-r--r--drivers/scsi/stex.c1
61 files changed, 2514 insertions, 692 deletions
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index ea66196ef7c7..82c6e7c7cdaf 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -242,7 +242,7 @@ static int aac_queuecommand(struct Scsi_Host *shost,
{
aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
- return aac_scsi_cmd(cmd) ? FAILED : 0;
+ return aac_scsi_cmd(cmd) ? SCSI_MLQUEUE_HOST_BUSY : 0;
}
/**
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 063e1b5818d3..06223b5ee6da 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2401,8 +2401,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %d, host_no %d,\n",
- scsi_host_busy(s), s->host_no);
+ printk(" host_no %d,\n", s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index adf3d9145606..95f3620059f7 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -882,6 +882,9 @@ static void asd_pci_remove(struct pci_dev *dev)
asd_disable_ints(asd_ha);
+ /* Ensure all scheduled tasklets complete before freeing resources */
+ tasklet_kill(&asd_ha->seq.dl_tasklet);
+
asd_remove_dev_attrs(asd_ha);
/* XXX more here as needed */
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index dc88bc46dcc0..a0e794ffc980 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5633,7 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1,
+ phba->wq = alloc_workqueue("beiscsi_%02x_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
phba->shost->host_no);
if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index ff9adfc0b332..bdfd06516671 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1528,7 +1528,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
goto out_disable_device;
}
- pci_save_state(pdev);
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 58da993251e9..0f68739d380a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2695,7 +2695,7 @@ static int __init bnx2fc_mod_init(void)
if (rc)
goto detach_ft;
- bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ bnx2fc_wq = alloc_workqueue("bnx2fc", WQ_PERCPU, 0);
if (!bnx2fc_wq) {
rc = -ENOMEM;
goto release_bt;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 79c8dafdd49e..db0c2174430a 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1093,7 +1093,6 @@ csio_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
/* Bring HW s/m to ready state.
* but don't resume IOs.
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1bf5948d1188..6fd89ae33059 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1300,7 +1300,7 @@ static int __init alua_init(void)
{
int r;
- kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
+ kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!kaluad_wq)
return -ENOMEM;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 4912087de10d..c8c5dfb3ba9a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2438,7 +2438,7 @@ static int __init fcoe_init(void)
unsigned int cpu;
int rc = 0;
- fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ fcoe_wq = alloc_workqueue("fcoe", WQ_PERCPU, 0);
if (!fcoe_wq)
return -ENOMEM;
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 763475587b7f..9801e5fbb0dd 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -134,7 +134,6 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->luns_per_tgt));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
- c->intr_timer_type = c->intr_timer_type;
/* for older firmware, GET_CONFIG will not return anything */
if (c->wq_copy_count == 0)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 17173239301e..1b3fbd328277 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -231,6 +231,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
+ if (shost->nr_reserved_cmds && !sht->queue_reserved_command) {
+ shost_printk(KERN_ERR, shost,
+ "nr_reserved_cmds set but no method to queue\n");
+ goto fail;
+ }
+
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
@@ -307,6 +313,14 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto out_del_dev;
+ if (shost->nr_reserved_cmds) {
+ shost->pseudo_sdev = scsi_get_pseudo_sdev(shost);
+ if (!shost->pseudo_sdev) {
+ error = -ENOMEM;
+ goto out_del_dev;
+ }
+ }
+
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
@@ -436,6 +450,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
shost->hostt = sht;
shost->this_id = sht->this_id;
shost->can_queue = sht->can_queue;
+ shost->nr_reserved_cmds = sht->nr_reserved_cmds;
shost->sg_tablesize = sht->sg_tablesize;
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun;
@@ -604,8 +619,8 @@ static bool scsi_host_check_in_flight(struct request *rq, void *data)
}
/**
- * scsi_host_busy - Return the host busy counter
- * @shost: Pointer to Scsi_Host to inc.
+ * scsi_host_busy - Return the count of in-flight commands
+ * @shost: Pointer to Scsi_Host
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 5a3787f27369..f259746bc804 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3533,7 +3533,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig);
- vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1,
+ vscsi->work_q = alloc_workqueue("ibmvscsis%s",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
dev_name(&vdev->dev));
if (!vscsi->work_q) {
rc = -ENOMEM;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 44214884deaf..95123689e9d1 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7859,7 +7859,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
- ioa_cfg->pdev->state_saved = true;
pci_restore_state(ioa_cfg->pdev);
if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index f96633fa6939..d05d09c1263d 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -85,15 +85,17 @@ struct isci_tmf {
struct completion *complete;
enum sas_protocol proto;
+ unsigned char lun[8];
+ u16 io_tag;
+ enum isci_tmf_function_codes tmf_code;
+ int status;
+
+ /* Must be last --ends in a flexible-array member. */
union {
struct ssp_response_iu resp_iu;
struct dev_to_host_fis d2h_fis;
u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
} resp;
- unsigned char lun[8];
- u16 io_tag;
- enum isci_tmf_function_codes tmf_code;
- int status;
};
static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 224edacf2d8e..689793d03c20 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -311,7 +311,6 @@ struct lpfc_defer_flogi_acc {
u16 rx_id;
u16 ox_id;
struct lpfc_nodelist *ndlp;
-
};
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@@ -634,6 +633,7 @@ struct lpfc_vport {
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
+#define FC_CT_RSPNI_PNI 0x40 /* RSPNI_PNI accepted by switch */
struct list_head fc_nodes;
spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
@@ -1078,6 +1078,8 @@ struct lpfc_hba {
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
+ unsigned long pni; /* 64-bit Platform Name Identifier */
+
uint8_t wwnn[8];
uint8_t wwpn[8];
uint32_t RandomData[7];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f93f8dca65bd..d3caac394291 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1743,6 +1743,28 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
static void
+lpfc_cmpl_ct_cmd_rspni_pni(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *ctrsp;
+ u32 ulp_status;
+
+ vport = cmdiocb->vport;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status == IOSTAT_SUCCESS) {
+ outp = cmdiocb->rsp_dmabuf;
+ ctrsp = (struct lpfc_sli_ct_request *)outp->virt;
+ if (be16_to_cpu(ctrsp->CommandResponse.bits.CmdRsp) ==
+ SLI_CT_RESPONSE_FS_ACC)
+ vport->ct_flags |= FC_CT_RSPNI_PNI;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+}
+
+static void
lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
@@ -1956,6 +1978,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSPNI_PNI)
+ bpl->tus.f.bdeSize = RSPNI_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_DA_ID)
bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -2077,6 +2101,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break;
+ case SLI_CTNS_RSPNI_PNI:
+ vport->ct_flags &= ~FC_CT_RSPNI_PNI;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RSPNI_PNI);
+ CtReq->un.rspni.pni = cpu_to_be64(phba->pni);
+ scnprintf(CtReq->un.rspni.symbname,
+ sizeof(CtReq->un.rspni.symbname), "OS Host Name::%s",
+ phba->os_host_name);
+ CtReq->un.rspni.len = strnlen(CtReq->un.rspni.symbname,
+ sizeof(CtReq->un.rspni.symbname));
+ cmpl = lpfc_cmpl_ct_cmd_rspni_pni;
+ break;
case SLI_CTNS_DA_ID:
/* Implement DA_ID Nameserver request */
CtReq->CommandResponse.bits.CmdRsp =
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 3d47dc7458d1..51cb8571c049 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -208,6 +208,7 @@ enum lpfc_nlp_flag {
NPR list */
NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */
NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */
+ NLP_FLOGI_DFR_ACC = 28, /* FLOGI LS_ACC was Deferred */
NLP_SC_REQ = 29, /* Target requires authentication */
NLP_FIRSTBURST = 30, /* Target supports FirstBurst */
NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b71db7d7d747..02b6d31b9ad9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -650,8 +650,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb;
@@ -934,10 +932,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
- * trigger the release of the node
+ * trigger the release of the node. Make sure the ndlp
+ * is marked NLP_DROPPED.
*/
- if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
+ !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
+ }
goto out;
}
@@ -995,9 +998,10 @@ stop_rr_fcf_flogi:
IOERR_LOOP_OPEN_FAILURE)))
lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2858 FLOGI Status:x%x/x%x TMO"
- ":x%x Data x%lx x%x\n",
+ ":x%x Data x%lx x%x x%lx x%x\n",
ulp_status, ulp_word4, tmo,
- phba->hba_flag, phba->fcf.fcf_flag);
+ phba->hba_flag, phba->fcf.fcf_flag,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -1015,14 +1019,17 @@ stop_rr_fcf_flogi:
* reference to trigger node release.
*/
if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
- !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
+ }
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI Status:x%x/x%x "
- "xri x%x TMO:x%x refcnt %d\n",
+ "xri x%x iotag x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
- tmo, kref_read(&ndlp->kref));
+ cmdiocb->iotag, tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
@@ -1279,6 +1286,19 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t tmo, did;
int rc;
+ /* It's possible for lpfc to reissue a FLOGI on an ndlp that is marked
+ * NLP_DROPPED. This happens when the FLOGI completed with the XB bit
+ * set causing lpfc to reference the ndlp until the XRI_ABORTED CQE is
+ * issued. The time window for the XRI_ABORTED CQE can be as much as
+ * 2*2*RA_TOV allowing for ndlp reuse of this type when the link is
+ * cycling quickly. When true, restore the initial reference and remove
+ * the NLP_DROPPED flag as lpfc is retrying.
+ */
+ if (test_and_clear_bit(NLP_DROPPED, &ndlp->nlp_flag)) {
+ if (!lpfc_nlp_get(ndlp))
+ return 1;
+ }
+
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_FLOGI);
@@ -1334,6 +1354,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0;
+
+ /* Fill out Auxiliary Parameter Data */
+ if (phba->pni) {
+ sp->aux.flags =
+ AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
+ sp->aux.pni = cpu_to_be64(phba->pni);
+ sp->aux.npiv_cnt = cpu_to_be16(phba->max_vpi - 1);
+ }
} else {
/* Historical, setting sequential-delivery bit for SLI3 */
sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
@@ -1413,11 +1441,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ox_id;
}
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc.rx_id,
- phba->defer_flogi_acc.ox_id, phba->hba_flag);
+ /* The LS_ACC completion needs to drop the initial reference.
+ * This is a special case for Pt2Pt because both FLOGIs need
+ * to complete and lpfc defers the LS_ACC when the remote
+ * FLOGI arrives before the driver's FLOGI.
+ */
+ set_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag);
/* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
@@ -1433,6 +1462,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ndlp = NULL;
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, ndlp x%px hba_flag x%lx\n",
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id,
+ phba->defer_flogi_acc.ndlp,
+ phba->hba_flag);
+
vport->fc_myDID = did;
}
@@ -2248,7 +2285,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
- sp->cmn.bbRcvSizeMsb &= 0xF;
+ if (!test_bit(FC_PT2PT, &vport->fc_flag))
+ sp->cmn.bbRcvSizeMsb &= 0xF;
/* Check if the destination port supports VMID */
ndlp->vmid_support = 0;
@@ -2367,7 +2405,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mode = KERN_INFO;
/* Warn PRLI status */
- lpfc_printf_vlog(vport, mode, LOG_ELS,
+ lpfc_vlog_msg(vport, mode, LOG_ELS,
"2754 PRLI DID:%06X Status:x%x/x%x, "
"data: x%x x%x x%lx\n",
ndlp->nlp_DID, ulp_status,
@@ -3024,6 +3062,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status,
ulp_word4);
+ /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1;
}
@@ -3262,7 +3301,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
return -ENOMEM;
}
rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
- (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
+ (u8 *)&ns_ndlp->fc_sparam, mbox, fc_ndlp->nlp_rpi);
if (rc) {
rc = -EACCES;
goto out;
@@ -3306,7 +3345,8 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
*
* This routine is a generic completion callback function for Discovery ELS cmd.
* Currently used by the ELS command issuing routines for the ELS State Change
- * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
+ * Request (SCR), lpfc_issue_els_scr(), Exchange Diagnostic Capabilities (EDC),
+ * lpfc_issue_els_edc() and the ELS RDF, lpfc_issue_els_rdf().
* These commands will be retried once only for ELS timeout errors.
**/
static void
@@ -3379,11 +3419,21 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
return;
}
+
if (ulp_status) {
/* ELS discovery cmd completes with error */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
"4203 ELS cmd x%x error: x%x x%X\n", cmd,
ulp_status, ulp_word4);
+
+ /* In the case where the ELS cmd completes with an error and
+ * the node does not have RPI registered, the node is
+ * outstanding and should put its initial reference.
+ */
+ if ((cmd == ELS_CMD_SCR || cmd == ELS_CMD_RDF) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
+ !test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
goto out;
}
@@ -3452,6 +3502,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
uint8_t *pcmd;
uint16_t cmdsize;
struct lpfc_nodelist *ndlp;
+ bool node_created = false;
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
@@ -3461,21 +3512,21 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
+ node_created = true;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
if (!elsiocb)
- return 1;
+ goto out_node_created;
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
if (rc) {
- lpfc_els_free_iocb(phba, elsiocb);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0937 %s: Failed to reg fc node, rc %d\n",
__func__, rc);
- return 1;
+ goto out_free_iocb;
}
}
pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
@@ -3494,23 +3545,27 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp);
- if (!elsiocb->ndlp) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (!elsiocb->ndlp)
+ goto out_free_iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue SCR: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto out_iocb_error;
return 0;
+
+out_iocb_error:
+ lpfc_nlp_put(ndlp);
+out_free_iocb:
+ lpfc_els_free_iocb(phba, elsiocb);
+out_node_created:
+ if (node_created)
+ lpfc_nlp_put(ndlp);
+ return 1;
}
/**
@@ -3597,8 +3652,8 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue RSCN: did:x%x",
- ndlp->nlp_DID, 0, 0);
+ "Issue RSCN: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
@@ -3705,10 +3760,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
lpfc_nlp_put(ndlp);
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of the node.
- */
- /* Don't release reference count as RDF is likely outstanding */
+
return 0;
}
@@ -3726,7 +3778,12 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
*
* Return code
* 0 - Successfully issued rdf command
- * 1 - Failed to issue rdf command
+ * < 0 - Failed to issue rdf command
+ * -EACCES - RDF not required for NPIV_PORT
+ * -ENODEV - No fabric controller device available
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ *
**/
int
lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
@@ -3737,25 +3794,30 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
struct lpfc_nodelist *ndlp;
uint16_t cmdsize;
int rc;
+ bool node_created = false;
+ int err;
cmdsize = sizeof(*prdf);
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return -EACCES;
+
ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
if (!ndlp) {
ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
if (!ndlp)
return -ENODEV;
lpfc_enqueue_node(vport, ndlp);
+ node_created = true;
}
- /* RDF ELS is not required on an NPIV VN_Port. */
- if (vport->port_type == LPFC_NPIV_PORT)
- return -EACCES;
-
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF);
- if (!elsiocb)
- return -ENOMEM;
+ if (!elsiocb) {
+ err = -ENOMEM;
+ goto out_node_created;
+ }
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
@@ -3781,8 +3843,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp) {
- lpfc_els_free_iocb(phba, elsiocb);
- return -EIO;
+ err = -EIO;
+ goto out_free_iocb;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -3791,11 +3853,19 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
- return -EIO;
+ err = -EIO;
+ goto out_iocb_error;
}
return 0;
+
+out_iocb_error:
+ lpfc_nlp_put(ndlp);
+out_free_iocb:
+ lpfc_els_free_iocb(phba, elsiocb);
+out_node_created:
+ if (node_created)
+ lpfc_nlp_put(ndlp);
+ return err;
}
/**
@@ -3816,19 +3886,23 @@ static int
lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
+ int rc;
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL);
/* Send LS_ACC */
- if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
+ if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "1623 Failed to RDF_ACC from x%x for x%x\n",
- ndlp->nlp_DID, vport->fc_myDID);
+ "1623 Failed to RDF_ACC from x%x for x%x Data: %d\n",
+ ndlp->nlp_DID, vport->fc_myDID, rc);
return -EIO;
}
+ rc = lpfc_issue_els_rdf(vport, 0);
/* Issue new RDF for reregistering */
- if (lpfc_issue_els_rdf(vport, 0)) {
+ if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "2623 Failed to re register RDF for x%x\n",
- vport->fc_myDID);
+ "2623 Failed to re register RDF for x%x Data: %d\n",
+ vport->fc_myDID, rc);
return -EIO;
}
@@ -4299,7 +4373,7 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
+ * lpfc_els_free_iocb routine to trigger the release of
* the node.
*/
lpfc_els_free_iocb(phba, elsiocb);
@@ -5127,7 +5201,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- /* The I/O iocb is complete. Clear the node and first dmbuf */
+ /* The I/O iocb is complete. Clear the node and first dmabuf */
elsiocb->ndlp = NULL;
/* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
@@ -5160,14 +5234,12 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = elsiocb->cmd_dmabuf;
lpfc_els_free_data(phba, buf_ptr1);
- elsiocb->cmd_dmabuf = NULL;
}
}
if (elsiocb->bpl_dmabuf) {
buf_ptr = elsiocb->bpl_dmabuf;
lpfc_els_free_bpl(phba, buf_ptr);
- elsiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -5305,11 +5377,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
LPFC_MBOXQ_t *mbox = NULL;
u32 ulp_status, ulp_word4, tmo, did, iotag;
+ u32 cmd;
if (!vport) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"3177 null vport in ELS rsp\n");
- goto out;
+ goto release;
}
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -5419,7 +5492,7 @@ out:
* these conditions because it doesn't need the login.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport && vport->port_type == LPFC_NPIV_PORT &&
+ vport->port_type == LPFC_NPIV_PORT &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
@@ -5435,6 +5508,27 @@ out:
}
}
+ /* The driver's unsolicited deferred FLOGI ACC in Pt2Pt needs to
+ * release the initial reference because the put after the free_iocb
+ * call removes only the reference from the defer logic. This FLOGI
+ * is never registered with the SCSI transport.
+ */
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
+ test_and_clear_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag)) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "3357 Pt2Pt Defer FLOGI ACC ndlp x%px, "
+ "nflags x%lx, fc_flag x%lx\n",
+ ndlp, ndlp->nlp_flag,
+ vport->fc_flag);
+ cmd = *((u32 *)cmdiocb->cmd_dmabuf->virt);
+ if (cmd == ELS_CMD_ACC) {
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
+ }
+ }
+
+release:
/* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -5569,7 +5663,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cls1.classValid = 0;
sp->cls2.classValid = 0;
sp->cls3.classValid = 0;
- sp->cls4.classValid = 0;
/* Copy our worldwide names */
memcpy(&sp->portName, &vport->fc_sparam.portName,
@@ -5583,7 +5676,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0,
sizeof(sp->un.vendorVersion));
- sp->cmn.bbRcvSizeMsb &= 0xF;
+ if (!test_bit(FC_PT2PT, &vport->fc_flag))
+ sp->cmn.bbRcvSizeMsb &= 0xF;
/* If our firmware supports this feature, convey that
* info to the target using the vendor specific field.
@@ -8402,13 +8496,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
&wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
-
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3344 Deferring FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc.rx_id,
- phba->defer_flogi_acc.ox_id, phba->hba_flag);
-
phba->defer_flogi_acc.flag = true;
/* This nlp_get is paired with nlp_puts that reset the
@@ -8417,6 +8504,14 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* processed or cancelled.
*/
phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3344 Deferring FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, ndlp x%px, hba_flag x%lx\n",
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id,
+ phba->defer_flogi_acc.ndlp,
+ phba->hba_flag);
return 0;
}
@@ -8734,7 +8829,7 @@ reject_out:
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
*
- * This routine processes Read Timout Value (RTV) IOCB received as an
+ * This routine processes Read Timeout Value (RTV) IOCB received as an
* ELS unsolicited event. It first checks the remote port state. If the
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
@@ -10357,11 +10452,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS
*/
- if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) {
- if (newnode)
- lpfc_nlp_put(ndlp);
+ if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
goto dropit;
- }
elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp)
@@ -10843,7 +10935,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/*
* The different unsolicited event handlers would tell us
- * if they are done with "mp" by setting cmd_dmabuf to NULL.
+ * if they are done with "mp" by setting cmd_dmabuf/bpl_dmabuf to NULL.
*/
if (elsiocb->cmd_dmabuf) {
lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
@@ -11423,6 +11515,13 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
+ /* Fill out Auxiliary Parameter Data */
+ if (phba->pni) {
+ sp->aux.flags =
+ AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
+ sp->aux.pni = cpu_to_be64(phba->pni);
+ }
+
pcmd += sizeof(uint32_t); /* CSP Word 2 */
pcmd += sizeof(uint32_t); /* CSP Word 3 */
pcmd += sizeof(uint32_t); /* CSP Word 4 */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 43d246c5c049..bb803f32bc1b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -424,6 +424,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
+ clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
"8438 Devloss timeout reversed on DID x%x "
@@ -566,7 +567,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
- lpfc_nlp_put(ndlp);
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
return fcf_inuse;
}
@@ -4371,6 +4373,8 @@ out:
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+ if (phba->pni)
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPNI_PNI, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 3bc0efa7453e..b2e353590ebb 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -168,6 +168,11 @@ struct lpfc_sli_ct_request {
uint8_t len;
uint8_t symbname[255];
} rspn;
+ struct rspni { /* For RSPNI_PNI requests */
+ __be64 pni;
+ u8 len;
+ u8 symbname[255];
+ } rspni;
struct gff {
uint32_t PortId;
} gff;
@@ -213,6 +218,8 @@ struct lpfc_sli_ct_request {
sizeof(struct da_id))
#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rspn))
+#define RSPNI_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rspni))
/*
* FsType Definitions
@@ -309,6 +316,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RIP_NN 0x0235
#define SLI_CTNS_RIPA_NN 0x0236
#define SLI_CTNS_RSNN_NN 0x0239
+#define SLI_CTNS_RSPNI_PNI 0x0240
#define SLI_CTNS_DA_ID 0x0300
/*
@@ -512,6 +520,21 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
};
+enum aux_parm_flags {
+ AUX_PARM_PNI_VALID = 0x20, /* FC Word 0, bit 29 */
+ AUX_PARM_DATA_VALID = 0x40, /* FC Word 0, bit 30 */
+};
+
+struct aux_parm {
+ u8 flags; /* FC Word 0, bit 31:24 */
+ u8 ext_feat[3]; /* FC Word 0, bit 23:0 */
+
+ __be64 pni; /* FC Word 1 and 2, platform name identifier */
+
+ __be16 rsvd; /* FC Word 3, bit 31:16 */
+ __be16 npiv_cnt; /* FC Word 3, bit 15:0 */
+} __packed;
+
struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn;
struct lpfc_name portName;
@@ -519,7 +542,7 @@ struct serv_parm { /* Structure is in Big Endian format */
struct class_parms cls1;
struct class_parms cls2;
struct class_parms cls3;
- struct class_parms cls4;
+ struct aux_parm aux;
union {
uint8_t vendorVersion[16];
struct {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f206267d9ecd..b1460b16dd91 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3057,12 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_vmid_vport_cleanup(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_DID == Fabric_Cntl_DID &&
- ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- lpfc_nlp_put(ndlp);
- continue;
- }
-
/* Fabric Ports not in UNMAPPED state are cleaned up in the
* DEVICE_RM event.
*/
@@ -7950,7 +7944,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate all driver workqueues here */
/* The lpfc_wq workqueue for deferred irq use */
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!phba->wq)
return -ENOMEM;
@@ -9082,9 +9076,9 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "6077 Setup FDMI mask: hba x%x port x%x\n",
- vport->fdmi_hba_mask, vport->fdmi_port_mask);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6077 Setup FDMI mask: hba x%x port x%x\n",
+ vport->fdmi_hba_mask, vport->fdmi_port_mask);
}
/**
@@ -14434,12 +14428,6 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
pci_restore_state(pdev);
- /*
- * As the new kernel behavior of pci_restore_state() API call clears
- * device saved_state flag, need to save the restored state again.
- */
- pci_save_state(pdev);
-
if (pdev->is_busmaster)
pci_set_master(pdev);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1e5ef93e67e3..8240d59f4120 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -432,8 +432,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
/* if already logged in, do implicit logout */
@@ -452,18 +450,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_type & NLP_FABRIC) &&
!(phba->nvmet_support)) {
- /* Clear ndlp info, since follow up PRLI may have
- * updated ndlp information
- */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
- clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
-
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
- ndlp, NULL);
- return 1;
+ break;
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
@@ -485,7 +472,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
break;
}
-
+ /* Clear ndlp info, since follow up processes may have
+ * updated ndlp information
+ */
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
@@ -1426,8 +1415,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7ea7c4245c69..73d77cfab5f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/lockdep.h>
+#include <linux/dmi.h>
+#include <linux/of.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -8447,6 +8449,70 @@ lpfc_set_host_tm(struct lpfc_hba *phba)
}
/**
+ * lpfc_get_platform_uuid - Attempts to extract a platform uuid
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine attempts to first read SMBIOS DMI data for the System
+ * Information structure offset 08h called System UUID. Else, no platform
+ * UUID will be advertised.
+ **/
+static void
+lpfc_get_platform_uuid(struct lpfc_hba *phba)
+{
+ int rc;
+ const char *uuid;
+ char pni[17] = {0}; /* 16 characters + '\0' */
+ bool is_ff = true, is_00 = true;
+ u8 i;
+
+ /* First attempt SMBIOS DMI */
+ uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
+ if (uuid) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2088 SMBIOS UUID %s\n",
+ uuid);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2099 Could not extract UUID\n");
+ }
+
+ if (uuid && uuid_is_valid(uuid)) {
+ /* Generate PNI from UUID format.
+ *
+ * 1.) Extract lower 64 bits from UUID format.
+ * 2.) Set 3h for NAA Locally Assigned Name Identifier format.
+ *
+ * e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy
+ *
+ * extract the yyyy-yyyyyyyyyyyy portion
+ * final PNI 3yyyyyyyyyyyyyyy
+ */
+ scnprintf(pni, sizeof(pni), "3%c%c%c%s",
+ uuid[20], uuid[21], uuid[22], &uuid[24]);
+
+ /* Sanitize the converted PNI */
+ for (i = 1; i < 16 && (is_ff || is_00); i++) {
+ if (pni[i] != '0')
+ is_00 = false;
+ if (pni[i] != 'f' && pni[i] != 'F')
+ is_ff = false;
+ }
+
+ /* Convert from char* to unsigned long */
+ rc = kstrtoul(pni, 16, &phba->pni);
+ if (!rc && !is_ff && !is_00) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2100 PNI 0x%016lx\n", phba->pni);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2101 PNI %s generation status %d\n",
+ pni, rc);
+ phba->pni = 0;
+ }
+ }
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -8529,6 +8595,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
}
+ /* Obtain platform UUID, only for SLI4 FC adapters */
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag))
+ lpfc_get_platform_uuid(phba);
+
if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
LPFC_DCBX_CEE_MODE)
set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
@@ -19858,13 +19928,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI
* @ndlp: pointer to lpfc nodelist data structure.
* @cmpl: completion call-back.
* @iocbq: data to load as mbox ctx_u information
*
- * This routine is invoked to remove the memory region that
- * provided rpi via a bitmask.
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
@@ -19894,7 +19966,6 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
return -EIO;
}
- /* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 31c3c5abdca6..f3dada5bf7c1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.11"
+#define LPFC_DRIVER_VERSION "14.4.0.12"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index b677d80e5874..ddeea0ee2834 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1150,9 +1150,13 @@ typedef struct LOG_BLOCK_SPAN_INFO {
} LD_SPAN_INFO, *PLD_SPAN_INFO;
struct MR_FW_RAID_MAP_ALL {
- struct MR_FW_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
+ /* Must be last --ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct MR_FW_RAID_MAP, raidMap, ldSpanMap,
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
+ );
} __attribute__ ((packed));
+static_assert(offsetof(struct MR_FW_RAID_MAP_ALL, raidMap.ldSpanMap) ==
+ offsetof(struct MR_FW_RAID_MAP_ALL, ldSpanMap));
struct MR_DRV_RAID_MAP {
/* total size of this structure, including this field.
@@ -1194,10 +1198,13 @@ struct MR_DRV_RAID_MAP {
* And it is mainly for code re-use purpose.
*/
struct MR_DRV_RAID_MAP_ALL {
-
- struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
+ /* Must be last --ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct MR_DRV_RAID_MAP, raidMap, ldSpanMap,
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
+ );
} __packed;
+static_assert(offsetof(struct MR_DRV_RAID_MAP_ALL, raidMap.ldSpanMap) ==
+ offsetof(struct MR_DRV_RAID_MAP_ALL, ldSpanMap));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 8ff4b89ff81e..9acca83d6958 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1534,7 +1534,7 @@ static int __init pm8001_init(void)
if (pm8001_use_tasklet && !pm8001_use_msix)
pm8001_use_tasklet = false;
- pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
+ pm8001_wq = alloc_workqueue("pm80xx", WQ_PERCPU, 0);
if (!pm8001_wq)
goto err;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 6b1ebab36fa3..7792e00800ae 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3374,7 +3374,8 @@ retry_probe:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool);
- qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
+ qedf->link_update_wq = alloc_workqueue("qedf_%u_link",
+ WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedf->lport->host->host_no);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
@@ -3585,7 +3586,8 @@ retry_probe:
ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */
- qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
+ qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
host->host_no);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
@@ -3628,7 +3630,8 @@ retry_probe:
}
qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
- WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ qedf->lport->host->host_no);
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
@@ -3641,7 +3644,8 @@ retry_probe:
sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no);
qedf->dpc_wq =
- alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
+ alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ host_buf);
}
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
@@ -4177,7 +4181,8 @@ static int __init qedf_init(void)
goto err3;
}
- qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
+ qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ "qedf_io_wq");
if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index b168bb2178e9..56685ee22fdf 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2768,7 +2768,7 @@ retry_probe:
}
qedi->offload_thread = alloc_workqueue("qedi_ofld%d",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedi->shost->host_no);
if (!qedi->offload_thread) {
QEDI_ERR(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index ef841f643171..26c312a48a19 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2799,7 +2799,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
- dprintk(2, " bus %i, target %i, lun %i\n",
+ dprintk(2, " bus %i, target %i, lun %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
@@ -2871,7 +2871,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
- "command packet data - b %i, t %i, l %i \n",
+ "command packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
@@ -2929,14 +2929,14 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg -= cnt;
dprintk(5, "qla1280_64bit_start_scsi: "
"continuation packet data - b %i, t "
- "%i, l %i \n", SCSI_BUS_32(cmd),
+ "%i, l %llu\n", SCSI_BUS_32(cmd),
SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE);
}
} else { /* No data transfer */
dprintk(5, "qla1280_64bit_start_scsi: No data, command "
- "packet data - b %i, t %i, l %i \n",
+ "packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
}
@@ -3655,7 +3655,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, "
- "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
+ "l %llu\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
if (sense_sz)
qla1280_dump_buffer(2,
@@ -3955,7 +3955,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
sp = scsi_cmd_priv(cmd);
printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
- printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
+ printk(" chan=%d, target = 0x%02x, lun = 0x%02llx, cmd_len = 0x%02x\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
CMD_CDBLEN(cmd));
printk(" CDB = ");
@@ -3976,29 +3976,6 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction);
}
-
-/**************************************************************************
- * ql1280_dump_device
- *
- **************************************************************************/
-static void
-ql1280_dump_device(struct scsi_qla_host *ha)
-{
-
- struct scsi_cmnd *cp;
- struct srb *sp;
- int i;
-
- printk(KERN_DEBUG "Outstanding Commands on controller:\n");
-
- for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
- if ((sp = ha->outstanding_cmds[i]) == NULL)
- continue;
- if ((cp = sp->cmd) == NULL)
- continue;
- qla1280_print_scsi_cmd(1, cp);
- }
-}
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 5136549005e7..a7e3ec9bba47 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -54,10 +54,11 @@
* | Misc | 0xd303 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
- * | Target Mode | 0xe081 | |
+ * | Target Mode | 0xe089 | |
* | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | |
+ * | Target Mode SRR | 0x11038 | |
* ----------------------------------------------------------------------
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cb95b7b12051..b3265952c4be 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3503,7 +3503,6 @@ struct isp_operations {
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
-#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 145defc420f2..55d531c19e6b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -766,7 +766,7 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */
extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
- struct qla_msix_entry *, int);
+ struct qla_msix_entry *);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 6a2e1c7fd125..d395cbfe6802 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4369,6 +4369,7 @@ enable_82xx_npiv:
ha->max_npiv_vports =
MIN_MULTI_ID_FABRIC - 1;
}
+ qlt_config_nvram_with_fw_version(vha);
qla2x00_get_resource_cnts(vha);
qla_init_iocb_limit(vha);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index c4c6b5c6658c..a3971afc2dd1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -4467,32 +4467,6 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
return IRQ_HANDLED;
}
-irqreturn_t
-qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
-{
- struct qla_hw_data *ha;
- struct qla_qpair *qpair;
- struct device_reg_24xx __iomem *reg;
- unsigned long flags;
-
- qpair = dev_id;
- if (!qpair) {
- ql_log(ql_log_info, NULL, 0x505b,
- "%s: NULL response queue pointer.\n", __func__);
- return IRQ_NONE;
- }
- ha = qpair->hw;
-
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- queue_work(ha->wq, &qpair->q_work);
-
- return IRQ_HANDLED;
-}
-
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
@@ -4505,7 +4479,6 @@ static const struct qla_init_msix_entry msix_entries[] = {
{ "rsp_q", qla24xx_msix_rsp_q },
{ "atio_q", qla83xx_msix_atio_q },
{ "qpair_multiq", qla2xxx_msix_rsp_q },
- { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
@@ -4792,9 +4765,10 @@ free_irqs:
}
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
- struct qla_msix_entry *msix, int vector_type)
+ struct qla_msix_entry *msix)
{
- const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
+ const struct qla_init_msix_entry *intr =
+ &msix_entries[QLA_MSIX_QPAIR_MULTIQ_RSP_Q];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 32eb0ce8b170..1f01576f044b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -253,6 +253,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ reinit_completion(&ha->mbx_intr_comp);
/* Unlock mbx registers and wait for interrupt */
ql_dbg(ql_dbg_mbx, vha, 0x100f,
@@ -279,6 +280,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ reinit_completion(&ha->mbx_intr_comp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (chip_reset != ha->chip_reset) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8b71ac0b1d99..0abc47e72e0b 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -899,9 +899,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
- ret = qla25xx_request_irq(ha, qpair, qpair->msix,
- ha->flags.disable_msix_handshake ?
- QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
+ ret = qla25xx_request_irq(ha, qpair, qpair->msix);
if (ret)
goto que_failed;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 316594aa40cc..42eb65a62f1f 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -1292,7 +1292,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
a.reason = FCNVME_RJT_RC_LOGIC;
a.explanation = FCNVME_RJT_EXP_NONE;
xmt_reject = true;
- kfree(item);
+ qla24xx_free_purex_item(item);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5ffd94586652..3a57f07d73f5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1862,12 +1862,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- if (qla2x00_chip_is_down(vha)) {
- req->outstanding_cmds[cnt] = NULL;
- sp->done(sp, res);
- continue;
- }
-
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1881,10 +1875,26 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
- cmd->aborted = 1;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->aborted = 1;
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ ha->tgt.tgt_ops->handle_data(cmd);
+ } else {
+ ha->tgt.tgt_ops->free_cmd(cmd);
+ }
break;
case TYPE_TGT_TMCMD:
- /* Skip task management functions. */
+ /*
+ * Currently, only ABTS response gets on the
+ * outstanding_cmds[]
+ */
+ qlt_free_ul_mcmd(ha,
+ (struct qla_tgt_mgmt_cmd *) sp);
break;
default:
break;
@@ -3397,7 +3407,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!ha->wq)) {
ret = -ENOMEM;
goto probe_failed;
@@ -3444,13 +3454,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mqenable = 0;
if (ha->mqenable) {
- bool startit = false;
-
- if (QLA_TGT_MODE_ENABLED())
- startit = false;
-
- if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
- startit = true;
+ bool startit = !!(host->active_mode & MODE_INITIATOR);
/* Create start of day qpairs for Block MQ */
for (i = 0; i < ha->max_qpairs; i++)
@@ -5280,7 +5284,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
- queue_work(system_unbound_wq, &fcport->reg_work);
+ queue_work(system_dfl_wq, &fcport->reg_work);
}
static
@@ -7244,6 +7248,7 @@ qla2xxx_wake_dpc(struct scsi_qla_host *vha)
if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
wake_up_process(t);
}
+EXPORT_SYMBOL(qla2xxx_wake_dpc);
/*
* qla2x00_rst_aen
@@ -7886,11 +7891,6 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
- /* pci_restore_state() clears the saved_state flag of the device
- * save restored state which resets saved_state flag
- */
- pci_save_state(pdev);
-
if (ha->mem_only)
rc = pci_enable_device_mem(pdev);
else
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1e81582085e3..d772136984c9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -104,8 +104,6 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
response_t *pkt);
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags);
-static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
- *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
@@ -136,20 +134,6 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
-static const char *prot_op_str(u32 prot_op)
-{
- switch (prot_op) {
- case TARGET_PROT_NORMAL: return "NORMAL";
- case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
- case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
- case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
- case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
- case TARGET_PROT_DIN_PASS: return "DIN_PASS";
- case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
- default: return "UNKNOWN";
- }
-}
-
/* This API intentionally takes dest as a parameter, rather than returning
* int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -226,6 +210,10 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
struct qla_tgt_sess_op *u;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
+ unsigned int add_cdb_len = 0;
+
+ /* atio must be the last member of qla_tgt_sess_op for add_cdb_len */
+ BUILD_BUG_ON(offsetof(struct qla_tgt_sess_op, atio) + sizeof(u->atio) != sizeof(*u));
if (tgt->tgt_stop) {
ql_dbg(ql_dbg_async, vha, 0x502c,
@@ -234,12 +222,17 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
goto out_term;
}
- u = kzalloc(sizeof(*u), GFP_ATOMIC);
+ if (atio->u.raw.entry_type == ATIO_TYPE7 &&
+ atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)
+ add_cdb_len =
+ ((unsigned int) atio->u.isp24.fcp_cmnd.add_cdb_len) * 4;
+
+ u = kzalloc(sizeof(*u) + add_cdb_len, GFP_ATOMIC);
if (u == NULL)
goto out_term;
u->vha = vha;
- memcpy(&u->atio, atio, sizeof(*atio));
+ memcpy(&u->atio, atio, sizeof(*atio) + add_cdb_len);
INIT_LIST_HEAD(&u->cmd_list);
spin_lock_irqsave(&vha->cmd_list_lock, flags);
@@ -252,7 +245,7 @@ out:
return;
out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked);
goto out;
}
@@ -271,7 +264,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
"Freeing unknown %s %p, because of Abort\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
- &u->atio, ha_locked, 0);
+ &u->atio, ha_locked);
goto abort;
}
@@ -285,7 +278,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
- &u->atio, ha_locked, 0);
+ &u->atio, ha_locked);
} else {
ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
@@ -1909,6 +1902,10 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
* ABTS response. So, in it ID fields are reversed.
*/
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe082,
+ "qla_target(%d): tag %u: Sending TERM EXCH CTIO for ABTS\n",
+ vha->vp_idx, le32_to_cpu(entry->exchange_addr_to_abort));
+
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->nport_handle = entry->nport_handle;
@@ -1987,8 +1984,12 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
cmd_lun = scsilun_to_int(
(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
- if (cmd_key == key && cmd_lun == lun)
+ if (cmd_key == key && cmd_lun == lun) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe085,
+ "qla_target(%d): tag %lld: aborted by TMR\n",
+ vha->vp_idx, cmd->se_cmd.tag);
cmd->aborted = 1;
+ }
}
spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
}
@@ -2017,7 +2018,6 @@ static void qlt_do_tmr_work(struct work_struct *work)
struct qla_hw_data *ha = mcmd->vha->hw;
int rc;
uint32_t tag;
- unsigned long flags;
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
@@ -2032,34 +2032,12 @@ static void qlt_do_tmr_work(struct work_struct *work)
mcmd->tmr_func, tag);
if (rc != 0) {
- spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
- switch (mcmd->tmr_func) {
- case QLA_TGT_ABTS:
- mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
- qlt_build_abts_resp_iocb(mcmd);
- break;
- case QLA_TGT_LUN_RESET:
- case QLA_TGT_CLEAR_TS:
- case QLA_TGT_ABORT_TS:
- case QLA_TGT_CLEAR_ACA:
- case QLA_TGT_TARGET_RESET:
- qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
- qla_sam_status);
- break;
-
- case QLA_TGT_ABORT_ALL:
- case QLA_TGT_NEXUS_LOSS_SESS:
- case QLA_TGT_NEXUS_LOSS:
- qlt_send_notify_ack(mcmd->qpair,
- &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
- break;
- }
- spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
-
ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
"qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
mcmd->vha->vp_idx, rc);
- mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ mcmd->flags |= QLA24XX_MGMT_LLD_OWNED;
+ mcmd->fc_tm_rsp = FCP_TMF_FAILED;
+ qlt_xmit_tm_rsp(mcmd);
}
}
@@ -2247,6 +2225,21 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
EXPORT_SYMBOL(qlt_free_mcmd);
/*
+ * If the upper layer knows about this mgmt cmd, then call its ->free_cmd()
+ * callback, which will eventually call qlt_free_mcmd(). Otherwise, call
+ * qlt_free_mcmd() directly.
+ */
+void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd)
+{
+ if (!mcmd)
+ return;
+ if (mcmd->flags & QLA24XX_MGMT_LLD_OWNED)
+ qlt_free_mcmd(mcmd);
+ else
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+}
+
+/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then
* reacquire
*/
@@ -2338,12 +2331,12 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
mcmd->reset_count, qpair->chip_reset);
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return;
}
- if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
+ if (mcmd->flags & QLA24XX_MGMT_SEND_NACK) {
switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
case ELS_LOGO:
case ELS_PRLO:
@@ -2376,7 +2369,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* qlt_xmit_tm_rsp() returns here..
*/
if (free_mcmd)
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
@@ -2443,7 +2436,7 @@ out_err:
return -1;
}
-static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
@@ -3218,12 +3211,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
uint32_t full_req_cnt = 0;
unsigned long flags = 0;
int res;
-
- if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
- (cmd->sess && cmd->sess->deleted)) {
- cmd->state = QLA_TGT_STATE_PROCESSED;
- return 0;
- }
+ int pre_xmit_res;
ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
"is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
@@ -3231,33 +3219,43 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
&cmd->se_cmd, qpair->id);
- res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ pre_xmit_res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
- if (unlikely(res != 0)) {
- return res;
- }
+ /*
+ * Check pre_xmit_res later because we want to check other errors
+ * first.
+ */
+
+ /* Begin timer on the first call, not on SRR retry. */
+ if (likely(cmd->jiffies_at_hw_st_entry == 0))
+ cmd->jiffies_at_hw_st_entry = get_jiffies_64();
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (unlikely(cmd->sent_term_exchg ||
+ cmd->sess->deleted ||
+ !qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe101,
+ "qla_target(%d): tag %lld: skipping send response for aborted cmd\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_unmap_sg(vha, cmd);
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return 0;
+ }
+
+ /* Check for errors from qlt_pre_xmit_response(). */
+ res = pre_xmit_res;
+ if (unlikely(res))
+ goto out_unmap_unlock;
+
if (xmit_type == QLA_TGT_XMIT_STATUS)
qpair->tgt_counters.core_qla_snd_status++;
else
qpair->tgt_counters.core_qla_que_buf++;
- if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
- /*
- * Either the port is not online or this request was from
- * previous life, just abort the processing.
- */
- cmd->state = QLA_TGT_STATE_PROCESSED;
- ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
- "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
- vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, qpair->chip_reset);
- res = 0;
- goto out_unmap_unlock;
- }
-
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, full_req_cnt);
if (unlikely(res))
@@ -3372,36 +3370,50 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
struct qla_tgt_prm prm;
unsigned long flags = 0;
int res = 0;
+ int pci_map_res;
struct qla_qpair *qpair = cmd->qpair;
+ /* Begin timer on the first call, not on SRR retry. */
+ if (likely(cmd->jiffies_at_hw_st_entry == 0))
+ cmd->jiffies_at_hw_st_entry = get_jiffies_64();
+
memset(&prm, 0, sizeof(prm));
prm.cmd = cmd;
prm.tgt = tgt;
prm.sg = NULL;
prm.req_cnt = 1;
- if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
- (cmd->sess && cmd->sess->deleted)) {
- /*
- * Either the port is not online or this request was from
- * previous life, just abort the processing.
- */
+ /* Calculate number of entries and segments required */
+ pci_map_res = qlt_pci_map_calc_cnt(&prm);
+ /*
+ * Check pci_map_res later because we want to check other errors first.
+ */
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+
+ if (unlikely(cmd->sent_term_exchg ||
+ cmd->sess->deleted ||
+ !qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe102,
+ "qla_target(%d): tag %lld: skipping data-out for aborted cmd\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_unmap_sg(vha, cmd);
cmd->aborted = 1;
cmd->write_data_transferred = 0;
cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
vha->hw->tgt.tgt_ops->handle_data(cmd);
- ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
- "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
- vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, qpair->chip_reset);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
}
- /* Calculate number of entries and segments required */
- if (qlt_pci_map_calc_cnt(&prm) != 0)
- return -EAGAIN;
+ /* Check for errors from qlt_pci_map_calc_cnt(). */
+ if (unlikely(pci_map_res != 0)) {
+ res = -EAGAIN;
+ goto out_unlock_free_unmap;
+ }
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
if (res != 0)
@@ -3438,6 +3450,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
return res;
out_unlock_free_unmap:
+ cmd->jiffies_at_hw_st_entry = 0;
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -3457,7 +3470,6 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
uint8_t *ep = &sts->expected_dif[0];
uint64_t lba = cmd->se_cmd.t_task_lba;
uint8_t scsi_status, sense_key, asc, ascq;
- unsigned long flags;
struct scsi_qla_host *vha = cmd->vha;
cmd->trc_flags |= TRC_DIF_ERR;
@@ -3528,16 +3540,14 @@ out:
case QLA_TGT_STATE_NEED_DATA:
/* handle_data will load DIF error code */
cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
vha->hw->tgt.tgt_ops->handle_data(cmd);
break;
default:
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if (cmd->aborted) {
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ if (cmd->sent_term_exchg) {
vha->hw->tgt.tgt_ops->free_cmd(cmd);
break;
}
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
ascq);
@@ -3611,6 +3621,62 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
}
/*
+ * Handle a SRR that had been previously associated with a command when the
+ * command has been aborted or otherwise cannot process the SRR.
+ *
+ * If reject is true, then attempt to reject the SRR. Otherwise abort the
+ * immediate notify exchange.
+ */
+void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_tgt_srr *srr = cmd->srr;
+
+ if (srr->imm_ntfy_recvd) {
+ if (reject)
+ srr->reject = true;
+ else
+ srr->aborted = true;
+
+ if (srr->ctio_recvd) {
+ /*
+ * The SRR should already be scheduled for processing,
+ * and the SRR processing code should see that the cmd
+ * has been aborted and take appropriate action. In
+ * addition, the cmd refcount should have been
+ * incremented, preventing the cmd from being freed
+ * until SRR processing is done.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102e,
+ "qla_target(%d): tag %lld: %s: SRR already scheduled\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ } else {
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+
+ /* Shedule processing for the SRR immediate notify. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102f,
+ "qla_target(%d): tag %lld: %s: schedule SRR %s\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__,
+ reject ? "reject" : "abort");
+ cmd->srr = NULL;
+ srr->cmd = NULL;
+ spin_lock_irqsave(&tgt->srr_lock, flags);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work(qla_tgt_wq, &tgt->srr_work);
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11030,
+ "qla_target(%d): tag %lld: %s: no IMM SRR; free SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ cmd->srr = NULL;
+ kfree(srr);
+ }
+}
+EXPORT_SYMBOL(qlt_srr_abort);
+
+/*
* If hardware_lock held on entry, might drop it, then reaquire
* This function sends the appropriate CTIO to ISP 2xxx or 24xx
*/
@@ -3618,43 +3684,61 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
{
- struct scsi_qla_host *vha = qpair->vha;
struct ctio7_to_24xx *ctio24;
- struct qla_hw_data *ha = vha->hw;
- request_t *pkt;
- int ret = 0;
+ struct scsi_qla_host *vha;
+ uint16_t loop_id;
uint16_t temp;
- ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
-
- if (cmd)
+ if (cmd) {
vha = cmd->vha;
+ loop_id = cmd->loop_id;
+ } else {
+ port_id_t id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
+ struct qla_hw_data *ha;
+ struct fc_port *sess;
+ unsigned long flags;
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
- if (pkt == NULL) {
+ vha = qpair->vha;
+ ha = vha->hw;
+
+ /*
+ * CTIO7_NHANDLE_UNRECOGNIZED works when aborting an idle
+ * command but not when aborting a command with an active CTIO
+ * exchange.
+ */
+ loop_id = CTIO7_NHANDLE_UNRECOGNIZED;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
+ if (sess)
+ loop_id = sess->loop_id;
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+
+ if (cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
+ "qla_target(%d): tag %lld: Sending TERM EXCH CTIO state %d cmd_sent_to_fw %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state,
+ cmd->cmd_sent_to_fw);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
+ "qla_target(%d): tag %u: Sending TERM EXCH CTIO (no cmd)\n",
+ vha->vp_idx, le32_to_cpu(atio->u.isp24.exchange_addr));
+ }
+
+ ctio24 = qla2x00_alloc_iocbs_ready(qpair, NULL);
+ if (!ctio24) {
ql_dbg(ql_dbg_tgt, vha, 0xe050,
"qla_target(%d): %s failed: unable to allocate "
"request packet\n", vha->vp_idx, __func__);
return -ENOMEM;
}
- if (cmd != NULL) {
- if (cmd->state < QLA_TGT_STATE_PROCESSED) {
- ql_dbg(ql_dbg_tgt, vha, 0xe051,
- "qla_target(%d): Terminating cmd %p with "
- "incorrect state %d\n", vha->vp_idx, cmd,
- cmd->state);
- } else
- ret = 1;
- }
-
qpair->tgt_counters.num_term_xchg_sent++;
- pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
+ ctio24->entry_count = 1;
+ ctio24->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio24->nport_handle = cpu_to_le16(loop_id);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -3671,12 +3755,25 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
- return ret;
+ return 0;
}
-static void qlt_send_term_exchange(struct qla_qpair *qpair,
- struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
- int ul_abort)
+/*
+ * Aborting a command that is active in the FW (i.e. cmd->cmd_sent_to_fw == 1)
+ * will usually trigger the FW to send a completion CTIO with error status,
+ * and the driver will then call the ->handle_data() or ->free_cmd() callbacks.
+ * This can be used to clear a command that is locked up in the FW unless there
+ * is something more seriously wrong.
+ *
+ * Aborting a command that is not active in the FW (i.e.
+ * cmd->cmd_sent_to_fw == 0) will not directly trigger any callbacks. Instead,
+ * when the target mode midlevel calls qlt_rdy_to_xfer() or
+ * qlt_xmit_response(), the driver will see that the cmd has been aborted and
+ * call the appropriate callback immediately without performing the requested
+ * operation.
+ */
+void qlt_send_term_exchange(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
{
struct scsi_qla_host *vha;
unsigned long flags = 0;
@@ -3700,10 +3797,14 @@ static void qlt_send_term_exchange(struct qla_qpair *qpair,
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
done:
- if (cmd && !ul_abort && !cmd->aborted) {
- if (cmd->sg_mapped)
- qlt_unmap_sg(vha, cmd);
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ if (cmd) {
+ /*
+ * Set this even if -ENOMEM above, since term exchange will be
+ * sent eventually...
+ */
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
}
if (!ha_locked)
@@ -3711,6 +3812,7 @@ done:
return;
}
+EXPORT_SYMBOL(qlt_send_term_exchange);
static void qlt_init_term_exchange(struct scsi_qla_host *vha)
{
@@ -3761,38 +3863,35 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
{
- struct qla_tgt *tgt = cmd->tgt;
- struct scsi_qla_host *vha = tgt->vha;
- struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_qpair *qpair = cmd->qpair;
unsigned long flags;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
- "qla_target(%d): terminating exchange for aborted cmd=%p "
- "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
- se_cmd->tag);
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if (cmd->aborted) {
- if (cmd->sg_mapped)
- qlt_unmap_sg(vha, cmd);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /*
- * It's normal to see 2 calls in this path:
- * 1) XFER Rdy completion + CMD_T_ABORT
- * 2) TCM TMR - drain_state_list
- */
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
- "multiple abort. %p transport_state %x, t_state %x, "
- "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
- cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
- return -EIO;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): tag %lld: cmd being aborted (state %d) %s; %s\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state,
+ cmd->cmd_sent_to_fw ? "sent to fw" : "not sent to fw",
+ cmd->aborted ? "aborted" : "not aborted");
+
+ if (cmd->state != QLA_TGT_STATE_DONE && !cmd->sent_term_exchg) {
+ if (!qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset) {
+ /*
+ * Chip was reset; just pretend that we sent the term
+ * exchange.
+ */
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
+ } else {
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
}
- cmd->aborted = 1;
- cmd->trc_flags |= TRC_ABORT;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3812,54 +3911,99 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
qlt_decr_num_pend_cmds(cmd->vha);
BUG_ON(cmd->sg_mapped);
+ if (unlikely(cmd->free_sg)) {
+ cmd->free_sg = 0;
+ qlt_free_sg(cmd);
+ }
+ if (unlikely(cmd->srr))
+ qlt_srr_abort(cmd, false);
+
+ if (unlikely(cmd->aborted ||
+ (cmd->trc_flags & (TRC_CTIO_STRANGE | TRC_CTIO_ERR |
+ TRC_SRR_CTIO | TRC_SRR_IMM)))) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xe086,
+ "qla_target(%d): tag %lld: free cmd (trc_flags %x, aborted %u, sent_term_exchg %u, rsp_sent %u)\n",
+ cmd->vha->vp_idx, cmd->se_cmd.tag,
+ cmd->trc_flags, cmd->aborted, cmd->sent_term_exchg,
+ cmd->rsp_sent);
+ }
+
+ if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
+ kfree(cmd->cdb);
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+ }
+
cmd->jiffies_at_free = get_jiffies_64();
if (!sess || !sess->se_sess) {
WARN_ON(1);
return;
}
- cmd->jiffies_at_free = get_jiffies_64();
cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
/*
- * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * Process a CTIO response for a SCSI command that failed due to SRR.
+ *
+ * qpair->qp_lock_ptr supposed to be held on entry
*/
-static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
- struct qla_tgt_cmd *cmd, uint32_t status)
+static int qlt_prepare_srr_ctio(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd)
{
- int term = 0;
- struct scsi_qla_host *vha = qpair->vha;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr *srr;
- if (cmd->se_cmd.prot_op)
- ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
- "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
- "se_cmd=%p tag[%x] op %#x/%s",
- cmd->lba, cmd->lba,
- cmd->num_blks, &cmd->se_cmd,
- cmd->atio.u.isp24.exchange_addr,
- cmd->se_cmd.prot_op,
- prot_op_str(cmd->se_cmd.prot_op));
-
- if (ctio != NULL) {
- struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
-
- term = !(c->flags &
- cpu_to_le16(OF_TERM_EXCH));
- } else
- term = 1;
+ cmd->trc_flags |= TRC_SRR_CTIO;
- if (term)
- qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
+ srr = cmd->srr;
+ if (srr != NULL) {
+ /* qlt_prepare_srr_imm() was called first. */
- return term;
-}
+ WARN_ON(srr->ctio_recvd);
+ WARN_ON(!srr->imm_ntfy_recvd);
+ if (vha->hw->tgt.tgt_ops->get_cmd_ref(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11037,
+ "qla_target(%d): tag %lld: unable to get cmd ref for SRR processing\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_srr_abort(cmd, true);
+ return -ESHUTDOWN;
+ }
+
+ srr->ctio_recvd = true;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100f,
+ "qla_target(%d): tag %lld: Scheduling SRR work\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ /* Schedule the srr for processing in qlt_handle_srr(). */
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &tgt->srr_work);
+ spin_unlock(&tgt->srr_lock);
+ return 0;
+ }
+
+ srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
+ if (!srr)
+ return -ENOMEM;
+
+ /* Expect qlt_prepare_srr_imm() to be called. */
+ srr->ctio_recvd = true;
+ srr->cmd = cmd;
+ srr->reset_count = cmd->reset_count;
+ cmd->srr = srr;
+ return 0;
+}
/* ha->hardware_lock supposed to be held on entry */
static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
- struct rsp_que *rsp, uint32_t handle, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, uint8_t cmd_type,
+ const void *ctio)
{
void *cmd = NULL;
struct req_que *req;
@@ -3882,29 +4026,97 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
h &= QLA_CMD_HANDLE_MASK;
- if (h != QLA_TGT_NULL_HANDLE) {
- if (unlikely(h >= req->num_outstanding_cmds)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe052,
- "qla_target(%d): Wrong handle %x received\n",
- vha->vp_idx, handle);
- return NULL;
- }
-
- cmd = req->outstanding_cmds[h];
- if (unlikely(cmd == NULL)) {
- ql_dbg(ql_dbg_async, vha, 0xe053,
- "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
- vha->vp_idx, handle, req->id, rsp->id);
- return NULL;
- }
- req->outstanding_cmds[h] = NULL;
- } else if (ctio != NULL) {
+ if (h == QLA_TGT_NULL_HANDLE) {
/* We can't get loop ID from CTIO7 */
ql_dbg(ql_dbg_tgt, vha, 0xe054,
"qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
"support NULL handles\n", vha->vp_idx);
return NULL;
}
+ if (unlikely(h >= req->num_outstanding_cmds)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe052,
+ "qla_target(%d): Wrong handle %x received\n",
+ vha->vp_idx, handle);
+ return NULL;
+ }
+
+ /*
+ * We passed a numeric handle for a cmd to the hardware, and the
+ * hardware passed the handle back to us. Look up the associated cmd,
+ * and validate that the cmd_type and exchange address match what the
+ * caller expects. This guards against buggy HBA firmware that returns
+ * the same CTIO multiple times.
+ */
+
+ cmd = req->outstanding_cmds[h];
+
+ if (unlikely(cmd == NULL)) {
+ if (cmd_type == TYPE_TGT_CMD) {
+ __le32 ctio_exchange_addr =
+ ((const struct ctio7_from_24xx *)ctio)->
+ exchange_address;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
+ "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
+ vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h,
+ handle, req->id, rsp->id);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
+ "qla_target(%d): cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
+ vha->vp_idx, handle, req->id, rsp->id);
+ }
+ return NULL;
+ }
+
+ if (unlikely(((srb_t *)cmd)->cmd_type != cmd_type)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe087,
+ "qla_target(%d): handle %x: cmd detached; ignoring CTIO (cmd_type mismatch)\n",
+ vha->vp_idx, h);
+ return NULL;
+ }
+
+ switch (cmd_type) {
+ case TYPE_TGT_CMD: {
+ __le32 ctio_exchange_addr =
+ ((const struct ctio7_from_24xx *)ctio)->
+ exchange_address;
+ __le32 cmd_exchange_addr =
+ ((struct qla_tgt_cmd *)cmd)->
+ atio.u.isp24.exchange_addr;
+
+ BUILD_BUG_ON(offsetof(struct ctio7_from_24xx,
+ exchange_address) !=
+ offsetof(struct ctio_crc_from_fw,
+ exchange_address));
+
+ if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe088,
+ "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
+ vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h);
+ return NULL;
+ }
+ break;
+ }
+
+ case TYPE_TGT_TMCMD: {
+ __le32 ctio_exchange_addr =
+ ((const struct abts_resp_from_24xx_fw *)ctio)->
+ exchange_address;
+ __le32 cmd_exchange_addr =
+ ((struct qla_tgt_mgmt_cmd *)cmd)->
+ orig_iocb.abts.exchange_address;
+
+ if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe089,
+ "qla_target(%d): ABTS: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
+ vha->vp_idx, h);
+ return NULL;
+ }
+ break;
+ }
+ }
+
+ req->outstanding_cmds[h] = NULL;
return cmd;
}
@@ -3913,12 +4125,13 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
- struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, uint32_t status,
+ struct ctio7_from_24xx *ctio)
{
struct qla_hw_data *ha = vha->hw;
- struct se_cmd *se_cmd;
struct qla_tgt_cmd *cmd;
struct qla_qpair *qpair = rsp->qpair;
+ uint16_t ctio_flags;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
/* That could happen only in case of an error/reset/abort */
@@ -3930,45 +4143,92 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
return;
}
- cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
- if (cmd == NULL)
- return;
+ ctio_flags = le16_to_cpu(ctio->flags);
+
+ cmd = qlt_ctio_to_cmd(vha, rsp, handle, TYPE_TGT_CMD, ctio);
+ if (unlikely(cmd == NULL)) {
+ if ((handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE &&
+ (ctio_flags & 0xe1ff) == (CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE)) {
+ u32 tag = le32_to_cpu(ctio->exchange_address);
- if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
- cmd->sess) {
- qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
- (struct ctio7_from_24xx *)ctio);
+ if (status == CTIO_SUCCESS)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe083,
+ "qla_target(%d): tag %u: term exchange successful\n",
+ vha->vp_idx, tag);
+ else
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe084,
+ "qla_target(%d): tag %u: term exchange failed; status = 0x%x\n",
+ vha->vp_idx, tag, status);
+ }
+ return;
}
- se_cmd = &cmd->se_cmd;
+ if ((ctio_flags & CTIO7_FLAGS_DATA_OUT) && cmd->sess)
+ qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, ctio);
+
cmd->cmd_sent_to_fw = 0;
qlt_unmap_sg(vha, cmd);
if (unlikely(status != CTIO_SUCCESS)) {
+ u8 op = cmd->cdb ? cmd->cdb[0] : 0;
+ bool term_exchg = false;
+
+ /*
+ * If the hardware terminated the exchange, then we don't need
+ * to send an explicit term exchange message.
+ */
+ if (ctio_flags & OF_TERM_EXCH) {
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
+ }
+
switch (status & 0xFFFF) {
case CTIO_INVALID_RX_ID:
+ term_exchg = true;
if (printk_ratelimit())
dev_info(&vha->hw->pdev->dev,
- "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
- vha->vp_idx, cmd->atio.u.isp24.attr,
+ "qla_target(%d): tag %lld, op %x: CTIO with INVALID_RX_ID status 0x%x received (state %d, port %8phC, LUN %lld, ATIO attr %x, CTIO Flags %x|%x)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
+ status, cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun, cmd->atio.u.isp24.attr,
((cmd->ctio_flags >> 9) & 0xf),
cmd->ctio_flags);
-
break;
+
case CTIO_LIP_RESET:
case CTIO_TARGET_RESET:
case CTIO_ABORTED:
- /* driver request abort via Terminate exchange */
+ term_exchg = true;
+ fallthrough;
case CTIO_TIMEOUT:
- /* They are OK */
+ {
+ const char *status_str;
+
+ switch (status & 0xFFFF) {
+ case CTIO_LIP_RESET:
+ status_str = "LIP_RESET";
+ break;
+ case CTIO_TARGET_RESET:
+ status_str = "TARGET_RESET";
+ break;
+ case CTIO_ABORTED:
+ status_str = "ABORTED";
+ break;
+ case CTIO_TIMEOUT:
+ default:
+ status_str = "TIMEOUT";
+ break;
+ }
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
- "qla_target(%d): CTIO with "
- "status %#x received, state %x, se_cmd %p, "
- "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
- "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
- status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
+ status_str, status, cmd->state,
+ cmd->sess->port_name, cmd->unpacked_lun);
break;
+ }
case CTIO_PORT_LOGGED_OUT:
case CTIO_PORT_UNAVAILABLE:
@@ -3977,11 +4237,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
- "qla_target(%d): CTIO with %s status %x "
- "received (state %x, se_cmd %p)\n", vha->vp_idx,
+ "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
- status, cmd->state, se_cmd);
+ status, cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
+ term_exchg = true;
if (logged_out && cmd->sess) {
/*
* Session is already logged out, but we need
@@ -3996,18 +4258,30 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
}
break;
}
+
+ case CTIO_SRR_RECEIVED:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100e,
+ "qla_target(%d): tag %lld, op %x: CTIO with SRR status 0x%x received (state %d, port %8phC, LUN %lld, bufflen %d)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun, cmd->bufflen);
+
+ if (qlt_prepare_srr_ctio(qpair, cmd) == 0)
+ return;
+ break;
+
case CTIO_DIF_ERROR: {
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
- "qla_target(%d): CTIO with DIF_ERROR status %x "
- "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
- "expect_dif[0x%llx]\n",
- vha->vp_idx, status, cmd->state, se_cmd,
+ "qla_target(%d): tag %lld, op %x: CTIO with DIF_ERROR status 0x%x received (state %d, port %8phC, LUN %lld, actual_dif[0x%llx] expect_dif[0x%llx])\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- qlt_handle_dif_error(qpair, cmd, ctio);
+ qlt_handle_dif_error(qpair, cmd, crc);
return;
}
@@ -4016,51 +4290,72 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
case CTIO_FAST_INVALID_REQ:
case CTIO_FAST_SPI_ERR:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
- "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
- vha->vp_idx, status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with EDIF error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
break;
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
- "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
- vha->vp_idx, status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
break;
}
+ cmd->trc_flags |= TRC_CTIO_ERR;
- /* "cmd->aborted" means
- * cmd is already aborted/terminated, we don't
- * need to terminate again. The exchange is already
- * cleaned up/freed at FW level. Just cleanup at driver
- * level.
+ /*
+ * In state QLA_TGT_STATE_NEED_DATA the failed CTIO was for
+ * Data-Out, so either abort the exchange or try sending check
+ * condition with sense data depending on the severity of
+ * the error. In state QLA_TGT_STATE_PROCESSED the failed CTIO
+ * was for status (and possibly Data-In), so don't try sending
+ * an error status again in that case (if the error was for
+ * Data-In with status, we could try sending status without
+ * Data-In, but we don't do that currently).
*/
- if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
- (!cmd->aborted)) {
- cmd->trc_flags |= TRC_CTIO_ERR;
- if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
- return;
- }
+ if (!cmd->sent_term_exchg &&
+ (term_exchg || cmd->state != QLA_TGT_STATE_NEED_DATA))
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
+
+ if (unlikely(cmd->srr != NULL)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11031,
+ "qla_target(%d): tag %lld, op %x: expected CTIO with SRR status; got status 0x%x: state %d, bufflen %d\n",
+ vha->vp_idx, cmd->se_cmd.tag,
+ cmd->cdb ? cmd->cdb[0] : 0, status, cmd->state,
+ cmd->bufflen);
+ qlt_srr_abort(cmd, true);
}
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
cmd->trc_flags |= TRC_CTIO_DONE;
+
+ if (likely(status == CTIO_SUCCESS))
+ cmd->rsp_sent = 1;
+
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->state = QLA_TGT_STATE_DATA_IN;
if (status == CTIO_SUCCESS)
cmd->write_data_transferred = 1;
+ cmd->jiffies_at_hw_st_entry = 0;
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->aborted) {
cmd->trc_flags |= TRC_CTIO_ABORTED;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
- "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
+ "qla_target(%d): tag %lld: Aborted command finished\n",
+ vha->vp_idx, cmd->se_cmd.tag);
} else {
cmd->trc_flags |= TRC_CTIO_STRANGE;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
- "qla_target(%d): A command in state (%d) should "
- "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+ "qla_target(%d): tag %lld: A command in state (%d) should not return a CTIO complete\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
}
if (unlikely(status != CTIO_SUCCESS) &&
@@ -4113,7 +4408,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
- unsigned char *cdb;
unsigned long flags;
uint32_t data_length;
int ret, fcp_task_attr, data_dir, bidi = 0;
@@ -4129,8 +4423,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
goto out_term;
}
- spin_lock_init(&cmd->cmd_lock);
- cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
if (atio->u.isp24.fcp_cmnd.rddata &&
@@ -4148,7 +4440,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
atio->u.isp24.fcp_cmnd.task_attr);
data_length = get_datalen_for_atio(atio);
- ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cmd->cdb, data_length,
fcp_task_attr, data_dir, bidi);
if (ret != 0)
goto out_term;
@@ -4166,9 +4458,14 @@ out_term:
*/
cmd->trc_flags |= TRC_DO_WORK_ERR;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1);
qlt_decr_num_pend_cmds(vha);
+ if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
+ kfree(cmd->cdb);
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+ }
cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -4292,18 +4589,43 @@ out:
cmd->se_cmd.cpuid = h->cpuid;
}
+/*
+ * Safely make a fixed-length copy of a variable-length atio by truncating the
+ * CDB if necessary.
+ */
+static void memcpy_atio(struct atio_from_isp *dst,
+ const struct atio_from_isp *src)
+{
+ int len;
+
+ memcpy(dst, src, sizeof(*dst));
+
+ /*
+ * If the CDB was truncated, prevent get_datalen_for_atio() from
+ * accessing invalid memory.
+ */
+ len = src->u.isp24.fcp_cmnd.add_cdb_len;
+ if (unlikely(len != 0)) {
+ dst->u.isp24.fcp_cmnd.add_cdb_len = 0;
+ memcpy(&dst->u.isp24.fcp_cmnd.add_cdb[0],
+ &src->u.isp24.fcp_cmnd.add_cdb[len * 4],
+ 4);
+ }
+}
+
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
{
struct qla_tgt_cmd *cmd;
+ int add_cdb_len;
cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
if (!cmd)
return NULL;
cmd->cmd_type = TYPE_TGT_CMD;
- memcpy(&cmd->atio, atio, sizeof(*atio));
+ memcpy_atio(&cmd->atio, atio);
INIT_LIST_HEAD(&cmd->sess_cmd_list);
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -4323,6 +4645,29 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->vp_idx = vha->vp_idx;
cmd->edif = sess->edif.enable;
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+
+ /*
+ * NOTE: memcpy_atio() set cmd->atio.u.isp24.fcp_cmnd.add_cdb_len to 0,
+ * so use the original value here.
+ */
+ add_cdb_len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+ if (unlikely(add_cdb_len != 0)) {
+ int cdb_len = 16 + add_cdb_len * 4;
+ u8 *cdb;
+
+ cdb = kmalloc(cdb_len, GFP_ATOMIC);
+ if (unlikely(!cdb)) {
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ return NULL;
+ }
+ /* CAUTION: copy CDB from atio not cmd->atio */
+ memcpy(cdb, atio->u.isp24.fcp_cmnd.cdb, cdb_len);
+ cmd->cdb = cdb;
+ cmd->cdb_len = cdb_len;
+ }
+
return cmd;
}
@@ -4900,6 +5245,863 @@ out:
}
/*
+ * Return true if the HBA firmware version is known to have bugs that
+ * prevent Sequence Level Error Recovery (SLER) / Sequence Retransmission
+ * Request (SRR) from working.
+ *
+ * Some bad versions are based on testing and some are based on "Marvell Fibre
+ * Channel Firmware Release Notes".
+ */
+static bool qlt_has_sler_fw_bug(struct qla_hw_data *ha)
+{
+ bool has_sler_fw_bug = false;
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ /*
+ * In the fw release notes:
+ * ER147301 was added to v9.05.00 causing SLER regressions
+ * FCD-259 was fixed in v9.08.00
+ * FCD-371 was fixed in v9.08.00
+ * FCD-1183 was fixed in v9.09.00
+ *
+ * QLE2694L (ISP2071) known bad firmware (tested):
+ * 9.06.02
+ * 9.07.00
+ * 9.08.02
+ * SRRs trigger hundreds of bogus entries in the response
+ * queue and various other problems.
+ *
+ * QLE2694L known good firmware (tested):
+ * 8.08.05
+ * 9.09.00
+ *
+ * Suspected bad firmware (not confirmed by testing):
+ * v9.05.xx
+ *
+ * unknown firmware:
+ * 9.00.00 - 9.04.xx
+ */
+ if (ha->fw_major_version == 9 &&
+ ha->fw_minor_version >= 5 &&
+ ha->fw_minor_version <= 8)
+ has_sler_fw_bug = true;
+ }
+
+ return has_sler_fw_bug;
+}
+
+/*
+ * Return true and print a message if the HA has been reset since the SRR
+ * immediate notify was received; else return false.
+ */
+static bool qlt_srr_is_chip_reset(struct scsi_qla_host *vha,
+ struct qla_qpair *qpair, struct qla_tgt_srr *srr)
+{
+ if (!vha->flags.online ||
+ !qpair->fw_started ||
+ srr->reset_count != qpair->chip_reset) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100d,
+ "qla_target(%d): chip reset; discarding IMM SRR\n",
+ vha->vp_idx);
+ return true;
+ }
+ return false;
+}
+
+/* Find and return the command associated with a SRR immediate notify. */
+static struct qla_tgt_cmd *qlt_srr_to_cmd(struct scsi_qla_host *vha,
+ const struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct fc_port *sess;
+ struct qla_tgt_cmd *cmd;
+ uint32_t tag = le32_to_cpu(iocb->u.isp24.exchange_address);
+ uint16_t loop_id;
+ be_id_t s_id;
+ unsigned long flags;
+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11009,
+ "qla_target(%d): IMM SRR with unknown exchange address; reject SRR\n",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ s_id.domain = iocb->u.isp24.port_id[2];
+ s_id.area = iocb->u.isp24.port_id[1];
+ s_id.al_pa = iocb->u.isp24.port_id[0];
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess)
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ if (!sess || sess->deleted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100a,
+ "qla_target(%d): could not find session for IMM SRR; reject SRR\n",
+ vha->vp_idx);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return NULL;
+ }
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, tag);
+ if (!cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100b,
+ "qla_target(%d): could not find cmd for IMM SRR; reject SRR\n",
+ vha->vp_idx);
+ } else {
+ u16 srr_ox_id = le16_to_cpu(iocb->u.isp24.srr_ox_id);
+ u16 cmd_ox_id = be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ if (srr_ox_id != cmd_ox_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100c,
+ "qla_target(%d): tag %lld: IMM SRR: srr_ox_id[%04x] != cmd_ox_id[%04x]; reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag,
+ srr_ox_id, cmd_ox_id);
+ cmd = NULL;
+ }
+ }
+
+ return cmd;
+}
+
+/*
+ * Handle an immediate notify SRR (Sequence Retransmission Request) message from
+ * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
+ * for the affected command.
+ *
+ * This may be called a second time for the same immediate notify SRR if
+ * CTIO_SRR_RECEIVED is never received and qlt_srr_abort() is called.
+ *
+ * Process context, no locks
+ */
+static void qlt_handle_srr_imm(struct scsi_qla_host *vha,
+ struct qla_tgt_srr *srr)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair;
+ struct qla_tgt_cmd *cmd;
+ uint8_t srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL;
+
+ /* handle qlt_srr_abort() */
+ if (srr->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11004,
+ "qla_target(%d): IMM SRR: terminating SRR for aborted cmd\n",
+ vha->vp_idx);
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+ kfree(srr);
+ return;
+ }
+ if (srr->reject) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
+ "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
+ vha->vp_idx);
+ goto out_reject;
+ }
+
+ /* Find the command associated with the SRR. */
+ cmd = qlt_srr_to_cmd(vha, &srr->imm_ntfy);
+ if (cmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
+ "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
+ vha->vp_idx);
+ srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID;
+ goto out_reject;
+ }
+
+ if (ha->tgt.tgt_ops->get_cmd_ref(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11038,
+ "qla_target(%d): IMM SRR: unable to get cmd ref; rejecting SRR\n",
+ vha->vp_idx);
+ cmd = NULL;
+ goto out_reject;
+ }
+
+ qpair = cmd->qpair;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+
+ if (cmd->reset_count != srr->reset_count) {
+ /* force a miscompare */
+ srr->reset_count = qpair->chip_reset ^ 1;
+ }
+ if (qlt_srr_is_chip_reset(vha, qpair, srr)) {
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ kfree(srr);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11001,
+ "qla_target(%d): tag %lld, op %x: received IMM SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->cdb ? cmd->cdb[0] : 0);
+
+ cmd->trc_flags |= TRC_SRR_IMM;
+
+ if (cmd->srr != NULL) {
+ if (cmd->srr->imm_ntfy_recvd) {
+ /*
+ * Received another immediate notify SRR message for
+ * this command before the previous one could be processed
+ * (not expected to happen).
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11006,
+ "qla_target(%d): tag %lld: received multiple IMM SRR; reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ goto out_reject;
+ }
+
+ /* qlt_prepare_srr_ctio() was called first. */
+ WARN_ON(!cmd->srr->ctio_recvd);
+
+ /*
+ * The immediate notify and CTIO handlers both allocated
+ * separate srr structs; combine them.
+ */
+ memcpy(&cmd->srr->imm_ntfy, &srr->imm_ntfy,
+ sizeof(srr->imm_ntfy));
+ kfree(srr);
+ srr = cmd->srr;
+ srr->imm_ntfy_recvd = true;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11002,
+ "qla_target(%d): tag %lld: schedule SRR work\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ /* Schedule the srr for processing in qlt_handle_srr(). */
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ /*
+ * Already running the work function; no need to schedule
+ * tgt->srr_work.
+ */
+ spin_unlock(&tgt->srr_lock);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ /* return with cmd refcount incremented */
+ return;
+ }
+
+ /* The CTIO SRR for this command has not yet been received. */
+
+ if (cmd->sent_term_exchg) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11007,
+ "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+ kfree(srr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ return;
+ }
+
+ /* If not expecting a CTIO, then reject IMM SRR. */
+ if (!cmd->cmd_sent_to_fw) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11008,
+ "qla_target(%d): tag %lld: IMM SRR but !cmd_sent_to_fw (state %d); reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ goto out_reject;
+ }
+
+ /* Expect qlt_prepare_srr_ctio() to be called. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11003,
+ "qla_target(%d): tag %lld: wait for CTIO SRR (state %d)\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
+ srr->cmd = cmd;
+ cmd->srr = srr;
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ return;
+
+out_reject:
+ qpair = vha->hw->base_qpair;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ if (!qlt_srr_is_chip_reset(vha, qpair, srr))
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ srr_explain);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ kfree(srr);
+}
+
+/*
+ * Handle an immediate notify SRR (Sequence Retransmission Request) message from
+ * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
+ * for the affected command.
+ *
+ * ha->hardware_lock supposed to be held on entry
+ */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr *srr;
+
+ ql_log(ql_log_warn, vha, 0x11000, "qla_target(%d): received IMM SRR\n",
+ vha->vp_idx);
+
+ /*
+ * Need cmd->qpair->qp_lock_ptr, but have ha->hardware_lock. Defer
+ * processing to a workqueue so that the right lock can be acquired
+ * safely.
+ */
+
+ srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
+ if (!srr)
+ goto out_reject;
+
+ memcpy(&srr->imm_ntfy, iocb, sizeof(srr->imm_ntfy));
+ srr->imm_ntfy_recvd = true;
+ srr->reset_count = vha->hw->base_qpair->chip_reset;
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work(qla_tgt_wq, &tgt->srr_work);
+ spin_unlock(&tgt->srr_lock);
+ /* resume processing in qlt_handle_srr_imm() */
+ return;
+
+out_reject:
+ qlt_send_notify_ack(vha->hw->base_qpair, iocb, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * If possible, undo the effect of qlt_set_data_offset() and restore the cmd
+ * data buffer back to its full size.
+ */
+static int qlt_restore_orig_sg(struct qla_tgt_cmd *cmd)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ WARN_ON(cmd->sg_mapped);
+
+ if (cmd->offset == 0) {
+ /* qlt_set_data_offset() has not been called. */
+ return 0;
+ }
+
+ if (se_cmd->t_data_sg == NULL ||
+ se_cmd->t_data_nents == 0 ||
+ se_cmd->data_length == 0) {
+ /* The original scatterlist is not available. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102c,
+ "qla_target(%d): tag %lld: cannot restore original cmd buffer; keep modified buffer at offset %d\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
+ return -ENOENT;
+ }
+
+ /* Restore the original scatterlist. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102d,
+ "qla_target(%d): tag %lld: restore original cmd buffer: offset %d -> 0\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
+ if (cmd->free_sg) {
+ cmd->free_sg = 0;
+ qlt_free_sg(cmd);
+ }
+ cmd->offset = 0;
+ cmd->sg = se_cmd->t_data_sg;
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->bufflen = se_cmd->data_length;
+ return 0;
+}
+
+/*
+ * Adjust the data buffer of the given command to skip over offset bytes from
+ * the beginning while also reducing the length by offset bytes.
+ *
+ * This may be called multiple times for a single command if there are multiple
+ * SRRs, which each call reducing the buffer size further relative to the
+ * previous call. Note that the buffer may be reset back to its original size
+ * by calling qlt_restore_orig_sg().
+ */
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct scatterlist *sg_srr_start = NULL, *sg;
+ uint32_t first_offset = offset;
+ int sg_srr_cnt, i;
+ int bufflen = 0;
+
+ WARN_ON(cmd->sg_mapped);
+
+ ql_dbg(ql_dbg_tgt, vha, 0x11020,
+ "qla_target(%d): tag %lld: %s: sg %p sg_cnt %d dir %d cmd->offset %d cmd->bufflen %d add offset %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->sg,
+ cmd->sg_cnt, cmd->dma_data_direction, cmd->offset, cmd->bufflen,
+ offset);
+
+ if (cmd->se_cmd.prot_op != TARGET_PROT_NORMAL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11021,
+ "qla_target(%d): tag %lld: %s: SRR with protection information at nonzero offset not implemented\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ return -EINVAL;
+ }
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11022,
+ "qla_target(%d): tag %lld: %s: Missing cmd->sg or zero cmd->sg_cnt\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Walk the current cmd->sg list until we locate the new sg_srr_start
+ */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ ql_dbg(ql_dbg_tgt, vha, 0x11023,
+ "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
+
+ if (first_offset < sg->length) {
+ sg_srr_start = sg;
+ break;
+ }
+ first_offset -= sg->length;
+ }
+
+ if (!sg_srr_start) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11024,
+ "qla_target(%d): tag %lld: Unable to locate sg_srr_start for offset: %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, offset);
+ return -EINVAL;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11025,
+ "qla_target(%d): tag %lld: prepare SRR sgl at sg index %d of %d byte offset %u of %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, i, cmd->sg_cnt,
+ first_offset, sg_srr_start->length);
+
+ sg_srr_cnt = cmd->sg_cnt - i;
+
+ if (first_offset == 0 && !cmd->free_sg) {
+ /*
+ * The offset points to the beginning of a scatterlist element.
+ * In this case there is no need to modify the first scatterlist
+ * element, so we can just point directly inside the original
+ * unmodified scatterlist.
+ */
+ ql_dbg(ql_dbg_tgt, vha, 0x11026, "point directly to old sgl\n");
+ cmd->sg = sg_srr_start;
+ } else {
+ /*
+ * Allocate at most 2 new scatterlist elements to reduce memory
+ * requirements.
+ */
+ int n_alloc_sg = min(sg_srr_cnt, 2);
+ struct scatterlist *sg_srr =
+ kmalloc_array(n_alloc_sg, sizeof(*sg_srr), GFP_ATOMIC);
+ if (!sg_srr) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11027,
+ "qla_target(%d): tag %lld: Unable to allocate SRR scatterlist\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ return -ENOMEM;
+ }
+ sg_init_table(sg_srr, n_alloc_sg);
+
+ /* Init the first sg element to skip over the unneeded data. */
+ sg_set_page(&sg_srr[0], sg_page(sg_srr_start),
+ sg_srr_start->length - first_offset,
+ sg_srr_start->offset + first_offset);
+ if (sg_srr_cnt == 1) {
+ ql_dbg(ql_dbg_tgt, vha, 0x11028,
+ "single-element array\n");
+ } else if (sg_srr_cnt == 2) {
+ /* Only two elements; copy the last element. */
+ ql_dbg(ql_dbg_tgt, vha, 0x11029,
+ "complete two-element array\n");
+ sg = sg_next(sg_srr_start);
+ sg_set_page(&sg_srr[1], sg_page(sg), sg->length,
+ sg->offset);
+ } else {
+ /*
+ * Three or more elements; chain our newly-allocated
+ * 2-entry array to the rest of the original
+ * scatterlist at the splice point.
+ */
+ ql_dbg(ql_dbg_tgt, vha, 0x1102a,
+ "chain to original scatterlist\n");
+ sg = sg_next(sg_srr_start);
+ sg_chain(sg_srr, 2, sg);
+ }
+
+ /*
+ * If the previous scatterlist was allocated here on a previous
+ * call, then it should be safe to free now.
+ */
+ if (cmd->free_sg)
+ qlt_free_sg(cmd);
+ cmd->sg = sg_srr;
+ cmd->free_sg = 1;
+ }
+
+ /* Note that sg_cnt doesn't include any extra chain elements. */
+ cmd->sg_cnt = sg_srr_cnt;
+ cmd->offset += offset;
+ cmd->bufflen -= offset;
+
+ /* Check the scatterlist length for consistency. */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ bufflen += sg->length;
+ }
+ if (bufflen != cmd->bufflen) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102b,
+ "qla_target(%d): tag %lld: %s: bad sgl length: expected %d got %d\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->bufflen, bufflen);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Given the "SRR relative offset" (offset of data to retry), determine what
+ * needs to be retransmitted (data and/or status) and return the mask in
+ * xmit_type. If retrying data, adjust the command buffer to point to only the
+ * data that need to be retried, skipping over the data that don't need to be
+ * retried.
+ *
+ * Returns 0 for success or a negative error number.
+ */
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+ uint32_t srr_rel_offs, int *xmit_type)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ int res = 0, rel_offs;
+
+ if (srr_rel_offs < cmd->offset ||
+ srr_rel_offs > cmd->offset + cmd->bufflen) {
+ *xmit_type = 0;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101e,
+ "qla_target(%d): tag %lld: srr_rel_offs %u outside accepted range %u - %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, srr_rel_offs,
+ cmd->offset, cmd->offset + cmd->bufflen);
+ return -EINVAL;
+ }
+
+ /*
+ * srr_rel_offs is the offset of the data we need from the beginning of
+ * the *original* buffer.
+ *
+ * cmd->offset is the offset of the current cmd scatterlist from the
+ * beginning of the *original* buffer, which might be nonzero if there
+ * was a previous SRR and the buffer could not be reset back to its
+ * original size.
+ *
+ * rel_offs is the offset of the data we need from the beginning of the
+ * current cmd scatterlist.
+ */
+ rel_offs = srr_rel_offs - cmd->offset;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101f,
+ "qla_target(%d): tag %lld: current buffer [%u - %u); srr_rel_offs=%d, rel_offs=%d\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset,
+ cmd->offset + cmd->bufflen, srr_rel_offs, rel_offs);
+
+ *xmit_type = QLA_TGT_XMIT_ALL;
+
+ if (rel_offs == cmd->bufflen)
+ *xmit_type = QLA_TGT_XMIT_STATUS;
+ else if (rel_offs > 0)
+ res = qlt_set_data_offset(cmd, rel_offs);
+
+ return res;
+}
+
+/*
+ * Process a SRR (Sequence Retransmission Request) for a SCSI command once both
+ * the immediate notify SRR and CTIO SRR have been received from the hw.
+ *
+ * Process context, no locks
+ */
+static void qlt_handle_srr(struct scsi_qla_host *vha, struct qla_tgt_srr *srr)
+{
+ struct qla_tgt_cmd *cmd = srr->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct qla_qpair *qpair = cmd->qpair;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t op = cmd->cdb ? cmd->cdb[0] : 0;
+ uint32_t srr_rel_offs = le32_to_cpu(srr->imm_ntfy.u.isp24.srr_rel_offs);
+ uint16_t srr_ui = le16_to_cpu(srr->imm_ntfy.u.isp24.srr_ui);
+ int xmit_type = 0;
+ bool xmit_response = false;
+ bool rdy_to_xfer = false;
+ bool did_timeout;
+ bool send_term_exch = false;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+
+ WARN_ON(cmd->cmd_sent_to_fw);
+
+ cmd->srr = NULL;
+
+ if (qlt_srr_is_chip_reset(vha, qpair, srr))
+ goto out_advance_cmd;
+
+ if (cmd->sent_term_exchg || cmd->sess->deleted || srr->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11010,
+ "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+
+ send_term_exch = true;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+
+ if (srr->reject)
+ goto out_reject;
+
+ /*
+ * If we receive multiple SRRs for the same command, place a time limit
+ * on how long we are willing to retry. This timeout should be less
+ * than SQA_MAX_HW_PENDING_TIME in scst_qla2xxx.c.
+ */
+ did_timeout = time_is_before_jiffies64((cmd->jiffies_at_hw_st_entry ? :
+ cmd->jiffies_at_alloc) + 30 * HZ);
+
+ qlt_restore_orig_sg(cmd);
+
+ switch (srr_ui) {
+ case SRR_IU_STATUS:
+ if (cmd->state != QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11011,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, op,
+ cmd->state);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11033,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11012,
+ "qla_target(%d): tag %lld, op %x: accept SRR_IU_STATUS and retransmit scsi_status=%x\n",
+ vha->vp_idx, se_cmd->tag, op,
+ se_cmd->scsi_status);
+ xmit_type = QLA_TGT_XMIT_STATUS;
+ xmit_response = true;
+ cmd->trc_flags |= TRC_SRR_RSP;
+ break;
+
+ case SRR_IU_DATA_IN:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11013,
+ "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_IN: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d, scsi_status=%x\n",
+ vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
+ cmd->sg_cnt, cmd->offset, srr_rel_offs,
+ se_cmd->scsi_status);
+
+ if (cmd->state != QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11014,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, cmd->state);
+ goto out_reject;
+ }
+
+ /*
+ * QLA_TGT_STATE_PROCESSED does not necessarily imply data-in
+ */
+ if (!qlt_has_data(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11015,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because cmd has no data to send\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11016,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because buffer is missing\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11034,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_IN due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
+ goto out_reject;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11017,
+ "qla_target(%d): tag %lld: accept SRR_IU_DATA_IN and retransmit data: bufflen=%d, offset=%d\n",
+ vha->vp_idx, se_cmd->tag, cmd->bufflen,
+ cmd->offset);
+ xmit_response = true;
+ cmd->trc_flags |= TRC_SRR_RSP;
+ break;
+
+ case SRR_IU_DATA_OUT:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11018,
+ "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_OUT: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d\n",
+ vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
+ cmd->sg_cnt, cmd->offset, srr_rel_offs);
+
+ if (cmd->state != QLA_TGT_STATE_NEED_DATA) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11019,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, cmd->state);
+ goto out_reject;
+ }
+
+ /*
+ * QLA_TGT_STATE_NEED_DATA implies there should be data-out
+ */
+ if (!qlt_has_data(cmd) || !cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101a,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT because buffer is missing\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11035,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_OUT due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
+ goto out_reject;
+
+ if (!(xmit_type & QLA_TGT_XMIT_DATA)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101b,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT: bad offset\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101c,
+ "qla_target(%d): tag %lld: accept SRR_IU_DATA_OUT and receive data again: bufflen=%d, offset=%d\n",
+ vha->vp_idx, se_cmd->tag, cmd->bufflen,
+ cmd->offset);
+ cmd->trc_flags |= TRC_SRR_XRDY;
+ rdy_to_xfer = true;
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101d,
+ "qla_target(%d): tag %lld, op %x: reject unknown srr_ui value 0x%x: state=%d, bufflen=%d, offset=%d, srr_offset=%d\n",
+ vha->vp_idx, se_cmd->tag, op, srr_ui, cmd->state,
+ cmd->bufflen, cmd->offset, srr_rel_offs);
+ goto out_reject;
+ }
+
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ if (xmit_response) {
+ /* For status and data-in, retransmit the response. */
+ if (qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status)) {
+ send_term_exch = true;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+ } else if (rdy_to_xfer) {
+ /* For data-out, receive data again. */
+ if (qlt_rdy_to_xfer(cmd)) {
+ send_term_exch = true;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+ }
+
+ return;
+
+out_reject:
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+out_advance_cmd:
+ if (!cmd->sent_term_exchg &&
+ (send_term_exch || cmd->state != QLA_TGT_STATE_NEED_DATA) &&
+ !qlt_srr_is_chip_reset(vha, qpair, srr)) {
+ cmd->trc_flags |= TRC_SRR_TERM;
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ /*
+ * The initiator should abort the command, but if not, try to
+ * return an error.
+ */
+ cmd->srr_failed = 1;
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
+ } else {
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ }
+ spin_unlock_irq(qpair->qp_lock_ptr);
+}
+
+/* Workqueue function for processing SRR work in process context. */
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+ struct scsi_qla_host *vha = tgt->vha;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11032,
+ "qla_target(%d): Entering SRR work\n", vha->vp_idx);
+
+ for (;;) {
+ struct qla_tgt_srr *srr;
+
+ spin_lock_irq(&tgt->srr_lock);
+ srr = list_first_entry_or_null(&tgt->srr_list, typeof(*srr),
+ srr_list_entry);
+ if (!srr) {
+ spin_unlock_irq(&tgt->srr_lock);
+ break;
+ }
+ list_del(&srr->srr_list_entry);
+ spin_unlock_irq(&tgt->srr_lock);
+
+ if (!srr->cmd) {
+ qlt_handle_srr_imm(vha, srr);
+ } else {
+ qlt_handle_srr(vha, srr);
+ vha->hw->tgt.tgt_ops->put_cmd_ref(srr->cmd);
+ kfree(srr);
+ }
+ }
+}
+
+/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
@@ -5325,6 +6527,12 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
if (qlt_24xx_handle_els(vha, iocb) == 0)
send_notify_ack = 0;
break;
+
+ case IMM_NTFY_SRR:
+ qlt_prepare_srr_imm(vha, iocb);
+ send_notify_ack = 0;
+ break;
+
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
"qla_target(%d): Received unknown immediate "
@@ -5359,7 +6567,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, atio, 1);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
@@ -5469,13 +6677,15 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
qlt_incr_num_pend_cmds(vha);
INIT_LIST_HEAD(&cmd->cmd_list);
- memcpy(&cmd->atio, atio, sizeof(*atio));
+ memcpy_atio(&cmd->atio, atio);
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
if (qfull) {
cmd->q_full = 1;
@@ -5588,7 +6798,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe05f,
"qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
- atio, 1, 0);
+ atio, 1);
break;
case -EBUSY:
ql_dbg(ql_dbg_tgt, vha, 0xe060,
@@ -5697,7 +6907,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_hw_data *ha = vha->hw;
- mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
+ mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, TYPE_TGT_TMCMD, pkt);
if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
ql_dbg(ql_dbg_async, vha, 0xe064,
"qla_target(%d): ABTS Comp without mcmd\n",
@@ -5717,7 +6927,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
le32_to_cpu(entry->error_subcode2) == 0) {
if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
return;
}
qlt_24xx_retry_term_exchange(vha, rsp->qpair,
@@ -5728,10 +6938,10 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
vha->vp_idx, entry->compl_status,
entry->error_subcode1,
entry->error_subcode2);
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
}
} else if (mcmd) {
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
}
}
@@ -5795,7 +7005,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe05f,
"qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
+ atio, 1);
break;
case -EBUSY:
ql_dbg(ql_dbg_tgt, vha, 0xe060,
@@ -5816,26 +7026,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
}
break;
- case CONTINUE_TGT_IO_TYPE:
- {
- struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-
- qlt_do_ctio_completion(vha, rsp, entry->handle,
- le16_to_cpu(entry->status)|(pkt->entry_status << 16),
- entry);
- break;
- }
-
- case CTIO_A64_TYPE:
- {
- struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-
- qlt_do_ctio_completion(vha, rsp, entry->handle,
- le16_to_cpu(entry->status)|(pkt->entry_status << 16),
- entry);
- break;
- }
-
case IMMED_NOTIFY_TYPE:
ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
@@ -6323,6 +7513,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
+ spin_lock_init(&tgt->srr_lock);
+ INIT_LIST_HEAD(&tgt->srr_list);
+ INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
atomic_set(&tgt->tgt_global_resets_count, 0);
base_vha->vha_tgt.qla_tgt = tgt;
@@ -6705,7 +7898,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
- ha_locked, 0);
+ ha_locked);
} else {
qlt_24xx_atio_pkt_all_vps(vha,
(struct atio_from_isp *)pkt, ha_locked);
@@ -6971,6 +8164,32 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
}
+/* Update any settings that depend on ha->fw_*_version. */
+void
+qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ql2xtgt_tape_enable && qlt_has_sler_fw_bug(ha)) {
+ ql_log(ql_log_warn, vha, 0x11036,
+ "WARNING: ignoring ql2xtgt_tape_enable due to buggy HBA firmware; please upgrade FW\n");
+
+ /* Disable FC Tape support */
+ if (ha->isp_ops->nvram_config == qla81xx_nvram_config) {
+ struct init_cb_81xx *icb =
+ (struct init_cb_81xx *)ha->init_cb;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
+ } else {
+ struct init_cb_24xx *icb =
+ (struct init_cb_24xx *)ha->init_cb;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
+ }
+ }
+}
+
void
qlt_modify_vp_config(struct scsi_qla_host *vha,
struct vp_config_entry_24xx *vpmod)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 15a59c125c53..61072fb41b29 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -184,6 +184,7 @@ struct nack_to_isp {
#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID 0x17
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
#define NOTIFY_ACK_SUCCESS 0x01
@@ -686,6 +687,8 @@ struct qla_tgt_func_tmpl {
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
struct qla_tgt_cmd *(*get_cmd)(struct fc_port *);
+ int (*get_cmd_ref)(struct qla_tgt_cmd *cmd);
+ void (*put_cmd_ref)(struct qla_tgt_cmd *cmd);
void (*rel_cmd)(struct qla_tgt_cmd *);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
@@ -754,6 +757,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
+#define QLA_TGT_STATE_DONE 4 /* cmd being freed */
/* ATIO task_codes field */
#define ATIO_SIMPLE_QUEUE 0
@@ -822,18 +826,26 @@ struct qla_tgt {
int notify_ack_expected;
int abts_resp_expected;
int modify_lun_expected;
+
+ spinlock_t srr_lock;
+ struct list_head srr_list;
+ struct work_struct srr_work;
+
atomic_t tgt_global_resets_count;
+
struct list_head tgt_list_entry;
};
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
uint32_t chip_reset;
- struct atio_from_isp atio;
struct work_struct work;
struct list_head cmd_list;
bool aborted;
struct rsp_que *rsp;
+
+ struct atio_from_isp atio;
+ /* DO NOT ADD ANYTHING ELSE HERE - atio must be last member */
};
enum trace_flags {
@@ -858,6 +870,7 @@ enum trace_flags {
TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19,
TRC_DIF_ERR = BIT_20,
+ TRC_SRR_IMM = BIT_21,
};
struct qla_tgt_cmd {
@@ -876,25 +889,36 @@ struct qla_tgt_cmd {
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
- spinlock_t cmd_lock;
- /* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
+
+ /* Call qlt_free_sg() if set. */
+ unsigned int free_sg:1;
+
unsigned int write_data_transferred:1;
+
+ /* Set if the SCSI status was sent successfully. */
+ unsigned int rsp_sent:1;
+
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
unsigned int edif:1;
+ /* Set if a SRR was rejected. */
+ unsigned int srr_failed:1;
+
+ /* Set if the exchange has been terminated. */
+ unsigned int sent_term_exchg:1;
+
/*
- * This variable may be set from outside the LIO and I/O completion
- * callback functions. Do not declare this member variable as a
- * bitfield to avoid a read-modify-write operation when this variable
- * is set.
+ * Set if sent_term_exchg is set, or if the cmd was aborted by a TMR,
+ * or if some other error prevents normal processing of the command.
*/
- unsigned int aborted;
+ unsigned int aborted:1;
+ struct qla_tgt_srr *srr;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
int bufflen; /* cmd buffer length */
@@ -925,13 +949,23 @@ struct qla_tgt_cmd {
uint8_t scsi_status, sense_key, asc, ascq;
struct crc_context *ctx;
- const uint8_t *cdb;
+ uint8_t *cdb;
uint64_t lba;
+ int cdb_len;
uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
uint32_t a_ref_tag, e_ref_tag;
#define DIF_BUNDL_DMA_VALID 1
uint16_t prot_flags;
+ unsigned long jiffies_at_term_exchg;
+
+ /*
+ * jiffies64 when qlt_rdy_to_xfer() or qlt_xmit_response() first
+ * called, or 0 when not in those states. Used to limit the number of
+ * SRR retries.
+ */
+ uint64_t jiffies_at_hw_st_entry;
+
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
@@ -965,6 +999,7 @@ struct qla_tgt_mgmt_cmd {
unsigned int flags;
#define QLA24XX_MGMT_SEND_NACK BIT_0
#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
+#define QLA24XX_MGMT_LLD_OWNED BIT_2
uint32_t reset_count;
struct work_struct work;
uint64_t unpacked_lun;
@@ -993,6 +1028,45 @@ struct qla_tgt_prm {
uint16_t tot_dsds;
};
+/*
+ * SRR (Sequence Retransmission Request) - resend or re-receive some or all
+ * data or status to recover from a transient I/O error.
+ */
+struct qla_tgt_srr {
+ /*
+ * Copy of immediate notify SRR message received from hw; valid only if
+ * imm_ntfy_recvd is true.
+ */
+ struct imm_ntfy_from_isp imm_ntfy;
+
+ struct list_head srr_list_entry;
+
+ /* The command affected by this SRR, or NULL if not yet determined. */
+ struct qla_tgt_cmd *cmd;
+
+ /* Used to detect if the HBA has been reset since receiving the SRR. */
+ uint32_t reset_count;
+
+ /*
+ * The hardware sends two messages for each SRR - an immediate notify
+ * and a CTIO with CTIO_SRR_RECEIVED status. These keep track of which
+ * messages have been received. The SRR can be processed once both of
+ * these are true.
+ */
+ bool imm_ntfy_recvd;
+ bool ctio_recvd;
+
+ /*
+ * This is set to true if the affected command was aborted (cmd may be
+ * set to NULL), in which case the immediate notify exchange also needs
+ * to be aborted.
+ */
+ bool aborted;
+
+ /* This is set to true to force the SRR to be rejected. */
+ bool reject;
+};
+
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
@@ -1048,6 +1122,20 @@ static inline uint32_t sid_to_key(const be_id_t s_id)
}
/*
+ * Free the scatterlist allocated by qlt_set_data_offset(). Call this only if
+ * cmd->free_sg is set.
+ */
+static inline void qlt_free_sg(struct qla_tgt_cmd *cmd)
+{
+ /*
+ * The scatterlist may be chained to the original scatterlist, but we
+ * only need to free the first segment here since that is the only part
+ * allocated by qlt_set_data_offset().
+ */
+ kfree(cmd->sg);
+}
+
+/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
@@ -1055,9 +1143,14 @@ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern int qlt_abort_cmd(struct qla_tgt_cmd *);
+void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject);
+void qlt_send_term_exchange(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
@@ -1073,6 +1166,7 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *);
+void qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index ceaf1c7b1d17..2fff68935338 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -291,6 +291,16 @@ static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess)
return cmd;
}
+static int tcm_qla2xxx_get_cmd_ref(struct qla_tgt_cmd *cmd)
+{
+ return target_get_sess_cmd(&cmd->se_cmd, true);
+}
+
+static void tcm_qla2xxx_put_cmd_ref(struct qla_tgt_cmd *cmd)
+{
+ target_put_sess_cmd(&cmd->se_cmd);
+}
+
static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
{
target_free_tag(cmd->sess->se_sess, &cmd->se_cmd);
@@ -303,6 +313,8 @@ static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
+ cmd->state = QLA_TGT_STATE_DONE;
+
cmd->qpair->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
@@ -529,6 +541,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
+ else if (cmd->srr_failed)
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_SNACK_REJECTED);
else
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
@@ -1524,6 +1539,8 @@ static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.get_cmd = tcm_qla2xxx_get_cmd,
+ .get_cmd_ref = tcm_qla2xxx_get_cmd_ref,
+ .put_cmd_ref = tcm_qla2xxx_put_cmd_ref,
.rel_cmd = tcm_qla2xxx_rel_cmd,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 75125d2021f5..7febc0baa9d6 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1016,7 +1016,7 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
uint32_t crash_record_size = 0;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
@@ -1099,7 +1099,7 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
/* Get Crash Record. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 83ff66f954e6..97329c97332f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -9796,11 +9796,6 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
*/
pci_restore_state(pdev);
- /* pci_restore_state() clears the saved_state flag of the device
- * save restored state which resets saved_state flag
- */
- pci_save_state(pdev);
-
/* Initialize device or resume if in suspended state */
rc = pci_enable_device(pdev);
if (rc) {
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 9a0f467264b3..76cdad063f7b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -216,6 +216,9 @@ int scsi_device_max_queue_depth(struct scsi_device *sdev)
*/
int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{
+ if (!sdev->budget_map.map)
+ return -EINVAL;
+
depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
if (depth > 0) {
@@ -255,6 +258,8 @@ EXPORT_SYMBOL(scsi_change_queue_depth);
*/
int scsi_track_queue_full(struct scsi_device *sdev, int depth)
{
+ if (!sdev->budget_map.map)
+ return 0;
/*
* Don't let QUEUE_FULLs on the same
@@ -826,8 +831,11 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
spin_lock_irqsave(shost->host_lock, flags);
while (list->next != &shost->__devices) {
next = list_entry(list->next, struct scsi_device, siblings);
- /* skip devices that we can't get a reference to */
- if (!scsi_device_get(next))
+ /*
+ * Skip pseudo devices and also devices we can't get a
+ * reference to.
+ */
+ if (!scsi_device_is_pseudo_dev(next) && !scsi_device_get(next))
break;
next = NULL;
list = list->next;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b2ab97be5db3..1f2a53ba5dd9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -230,6 +230,7 @@ struct tape_block {
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_CMD_ABORT 0x10000
+#define SDEBUG_OPT_UNALIGNED_WRITE 0x20000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
@@ -237,7 +238,8 @@ struct tape_block {
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \
SDEBUG_OPT_HOST_BUSY | \
- SDEBUG_OPT_CMD_ABORT)
+ SDEBUG_OPT_CMD_ABORT | \
+ SDEBUG_OPT_UNALIGNED_WRITE)
#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
@@ -2961,11 +2963,11 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
- unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
- arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ unsigned char *arr __free(kfree) = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+
if (!arr)
return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
@@ -4932,6 +4934,14 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
+ atomic_read(&sdeb_inject_pending))) {
+ atomic_set(&sdeb_inject_pending, 0);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
+ UNALIGNED_WRITE_ASCQ);
+ return check_condition_result;
+ }
+
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -6752,20 +6762,59 @@ static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
return false;
}
+struct sdebug_abort_cmd {
+ u32 unique_tag;
+};
+
+enum sdebug_internal_cmd_type {
+ SCSI_DEBUG_ABORT_CMD,
+};
+
+struct sdebug_internal_cmd {
+ enum sdebug_internal_cmd_type type;
+
+ union {
+ struct sdebug_abort_cmd abort_cmd;
+ };
+};
+
+union sdebug_priv {
+ struct sdebug_scsi_cmd cmd;
+ struct sdebug_internal_cmd internal_cmd;
+};
+
/*
- * Called from scsi_debug_abort() only, which is for timed-out cmd.
+ * Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
+ * it would be possible to call scsi_debug_stop_cmnd() directly, an internal
+ * command is allocated and submitted to trigger the reserved command
+ * infrastructure.
*/
static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
{
- struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(&sdsc->lock, flags);
- res = scsi_debug_stop_cmnd(cmnd);
- spin_unlock_irqrestore(&sdsc->lock, flags);
-
- return res;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct request *rq = scsi_cmd_to_rq(cmnd);
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ struct sdebug_internal_cmd *internal_cmd;
+ struct scsi_cmnd *abort_cmd;
+ struct request *abort_rq;
+ blk_status_t res;
+
+ abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
+ BLK_MQ_REQ_RESERVED);
+ if (!abort_cmd)
+ return false;
+ internal_cmd = scsi_cmd_priv(abort_cmd);
+ *internal_cmd = (struct sdebug_internal_cmd) {
+ .type = SCSI_DEBUG_ABORT_CMD,
+ .abort_cmd = {
+ .unique_tag = unique_tag,
+ },
+ };
+ abort_rq = scsi_cmd_to_rq(abort_cmd);
+ abort_rq->timeout = secs_to_jiffies(3);
+ res = blk_execute_rq(abort_rq, true);
+ scsi_put_internal_cmd(abort_cmd);
+ return res == BLK_STS_OK;
}
/*
@@ -9220,6 +9269,56 @@ out_handle:
return ret;
}
+/* Process @scp, a request to abort a SCSI command by tag. */
+static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
+{
+ struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
+ struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
+ const u32 unique_tag = abort_cmd->unique_tag;
+ struct scsi_cmnd *to_be_aborted_scmd =
+ scsi_host_find_tag(shost, unique_tag);
+ struct sdebug_scsi_cmd *to_be_aborted_sdsc =
+ scsi_cmd_priv(to_be_aborted_scmd);
+ bool res = false;
+
+ if (!to_be_aborted_scmd) {
+ pr_err("%s: command with tag %#x not found\n", __func__,
+ unique_tag);
+ return;
+ }
+
+ scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
+ res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
+
+ if (res)
+ pr_info("%s: aborted command with tag %#x\n",
+ __func__, unique_tag);
+ else
+ pr_err("%s: failed to abort command with tag %#x\n",
+ __func__, unique_tag);
+
+ set_host_byte(scp, res ? DID_OK : DID_ERROR);
+}
+
+static int scsi_debug_process_reserved_command(struct Scsi_Host *shost,
+ struct scsi_cmnd *scp)
+{
+ struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
+
+ switch (internal_cmd->type) {
+ case SCSI_DEBUG_ABORT_CMD:
+ scsi_debug_abort_cmd(shost, scp);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ set_host_byte(scp, DID_ERROR);
+ break;
+ }
+
+ scsi_done(scp);
+ return 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -9420,6 +9519,9 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
struct sdebug_defer *sd_dp = &sdsc->sd_dp;
+ if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
+ return 0;
+
spin_lock_init(&sdsc->lock);
hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -9439,6 +9541,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.sdev_destroy = scsi_debug_sdev_destroy,
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
+ .queue_reserved_command = scsi_debug_process_reserved_command,
.change_queue_depth = sdebug_change_qdepth,
.map_queues = sdebug_map_queues,
.mq_poll = sdebug_blk_mq_poll,
@@ -9448,6 +9551,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.eh_bus_reset_handler = scsi_debug_bus_reset,
.eh_host_reset_handler = scsi_debug_host_reset,
.can_queue = SDEBUG_CANQUEUE,
+ .nr_reserved_cmds = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
.cmd_per_lun = DEF_CMD_PER_LUN,
@@ -9456,7 +9560,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.module = THIS_MODULE,
.skip_settle_delay = 1,
.track_queue_depth = 1,
- .cmd_size = sizeof(struct sdebug_scsi_cmd),
+ .cmd_size = sizeof(union sdebug_priv),
.init_cmd_priv = sdebug_init_cmd_priv,
.target_alloc = sdebug_target_alloc,
.target_destroy = sdebug_target_destroy,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c13812a3f03..f869108fd969 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -749,6 +749,9 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
const struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
+ if (!sdev->budget_map.map)
+ return;
+
if (!sht->track_queue_depth ||
sdev->queue_depth >= sdev->max_queue_depth)
return;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d7e42293b864..51ad2ad07e43 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -396,7 +396,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
- sbitmap_put(&sdev->budget_map, cmd->budget_token);
+ if (sdev->budget_map.map)
+ sbitmap_put(&sdev->budget_map, cmd->budget_token);
cmd->budget_token = -1;
}
@@ -1360,6 +1361,9 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
{
int token;
+ if (!sdev->budget_map.map)
+ return INT_MAX;
+
token = sbitmap_get(&sdev->budget_map);
if (token < 0)
return -1;
@@ -1530,6 +1534,14 @@ static void scsi_complete(struct request *rq)
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
enum scsi_disposition disposition;
+ if (blk_mq_is_reserved_rq(rq)) {
+ /* Only pass-through requests are supported in this code path. */
+ WARN_ON_ONCE(!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)));
+ scsi_mq_uninit_cmd(cmd);
+ __blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result));
+ return;
+ }
+
INIT_LIST_HEAD(&cmd->eh_entry);
atomic_inc(&cmd->device->iodone_cnt);
@@ -1749,7 +1761,8 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
struct scsi_device *sdev = q->queuedata;
- sbitmap_put(&sdev->budget_map, budget_token);
+ if (sdev->budget_map.map)
+ sbitmap_put(&sdev->budget_map, budget_token);
}
/*
@@ -1818,25 +1831,31 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(cmd->budget_token < 0);
/*
- * If the device is not in running state we will reject some or all
- * commands.
+ * Bypass the SCSI device, SCSI target and SCSI host checks for
+ * reserved commands.
*/
- if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
- ret = scsi_device_state_check(sdev, req);
- if (ret != BLK_STS_OK)
- goto out_put_budget;
- }
+ if (!blk_mq_is_reserved_rq(req)) {
+ /*
+ * If the device is not in running state we will reject some or
+ * all commands.
+ */
+ if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
+ ret = scsi_device_state_check(sdev, req);
+ if (ret != BLK_STS_OK)
+ goto out_put_budget;
+ }
- ret = BLK_STS_RESOURCE;
- if (!scsi_target_queue_ready(shost, sdev))
- goto out_put_budget;
- if (unlikely(scsi_host_in_recovery(shost))) {
- if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
- ret = BLK_STS_OFFLINE;
- goto out_dec_target_busy;
+ ret = BLK_STS_RESOURCE;
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto out_put_budget;
+ if (unlikely(scsi_host_in_recovery(shost))) {
+ if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
+ ret = BLK_STS_OFFLINE;
+ goto out_dec_target_busy;
+ }
+ if (!scsi_host_queue_ready(q, shost, sdev, cmd))
+ goto out_dec_target_busy;
}
- if (!scsi_host_queue_ready(q, shost, sdev, cmd))
- goto out_dec_target_busy;
/*
* Only clear the driver-private command data if the LLD does not supply
@@ -1865,6 +1884,14 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
blk_mq_start_request(req);
+ if (blk_mq_is_reserved_rq(req)) {
+ reason = shost->hostt->queue_reserved_command(shost, cmd);
+ if (reason) {
+ ret = BLK_STS_RESOURCE;
+ goto out_put_budget;
+ }
+ return BLK_STS_OK;
+ }
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
@@ -2083,7 +2110,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->ops = &scsi_mq_ops_no_commit;
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
tag_set->nr_maps = shost->nr_maps ? : 1;
- tag_set->queue_depth = shost->can_queue;
+ tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds;
+ tag_set->reserved_tags = shost->nr_reserved_cmds;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
if (shost->hostt->tag_alloc_policy_rr)
@@ -2107,6 +2135,44 @@ void scsi_mq_free_tags(struct kref *kref)
}
/**
+ * scsi_get_internal_cmd() - Allocate an internal SCSI command.
+ * @sdev: SCSI device from which to allocate the command
+ * @data_direction: Data direction for the allocated command
+ * @flags: request allocation flags, e.g. BLK_MQ_REQ_RESERVED or
+ * BLK_MQ_REQ_NOWAIT.
+ *
+ * Allocates a SCSI command for internal LLDD use.
+ */
+struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
+ enum dma_data_direction data_direction,
+ blk_mq_req_flags_t flags)
+{
+ enum req_op op = data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT :
+ REQ_OP_DRV_IN;
+ struct scsi_cmnd *scmd;
+ struct request *rq;
+
+ rq = scsi_alloc_request(sdev->request_queue, op, flags);
+ if (IS_ERR(rq))
+ return NULL;
+ scmd = blk_mq_rq_to_pdu(rq);
+ scmd->device = sdev;
+
+ return scmd;
+}
+EXPORT_SYMBOL_GPL(scsi_get_internal_cmd);
+
+/**
+ * scsi_put_internal_cmd() - Free an internal SCSI command.
+ * @scmd: SCSI command to be freed
+ */
+void scsi_put_internal_cmd(struct scsi_cmnd *scmd)
+{
+ blk_mq_free_request(blk_mq_rq_from_pdu(scmd));
+}
+EXPORT_SYMBOL_GPL(scsi_put_internal_cmd);
+
+/**
* scsi_device_from_queue - return sdev associated with a request_queue
* @q: The request queue to return the sdev from
*
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index b02af340c2d3..3cd0d3074085 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -26,9 +26,9 @@ static void scsi_log_release_buffer(char *bufptr)
kfree(bufptr);
}
-static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+static inline const char *scmd_name(struct scsi_cmnd *scmd)
{
- struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+ const struct request *rq = scsi_cmd_to_rq(scmd);
if (!rq->q || !rq->q->disk)
return NULL;
@@ -80,8 +80,8 @@ void sdev_prefix_printk(const char *level, const struct scsi_device *sdev,
}
EXPORT_SYMBOL(sdev_prefix_printk);
-void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
- const char *fmt, ...)
+void scmd_printk(const char *level, struct scsi_cmnd *scmd, const char *fmt,
+ ...)
{
va_list args;
char *logbuf;
@@ -94,7 +94,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
if (!logbuf)
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag);
+ scsi_cmd_to_rq(scmd)->tag);
if (off < logbuf_len) {
va_start(args, fmt);
off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -371,16 +371,15 @@ void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
EXPORT_SYMBOL(__scsi_print_sense);
/* Normalize and print sense buffer in SCSI command */
-void scsi_print_sense(const struct scsi_cmnd *cmd)
+void scsi_print_sense(struct scsi_cmnd *cmd)
{
scsi_log_print_sense(cmd->device, scmd_name(cmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag,
- cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+ scsi_cmd_to_rq(cmd)->tag, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_print_sense);
-void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
- int disposition)
+void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition)
{
char *logbuf;
size_t off, logbuf_len;
@@ -393,7 +392,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag);
+ scsi_cmd_to_rq(cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d581613d87c7..2652fecbfe47 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -205,7 +205,6 @@ static int scsi_runtime_idle(struct device *dev)
/* Insert hooks here for targets, hosts, and transport classes */
if (scsi_is_sdev_device(dev)) {
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return -EBUSY;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5b2b19f5e8ec..d07ec15d6c00 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -135,6 +135,7 @@ extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *);
+struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *);
/* scsi_sysctl.c */
#ifdef CONFIG_SYSCTL
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3c6e089e80c3..7acbfcfc2172 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -347,6 +347,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
+ scsi_sysfs_device_initialize(sdev);
+
+ if (scsi_device_is_pseudo_dev(sdev))
+ return sdev;
+
depth = sdev->host->cmd_per_lun ?: 1;
/*
@@ -363,8 +368,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
scsi_change_queue_depth(sdev, depth);
- scsi_sysfs_device_initialize(sdev);
-
if (shost->hostt->sdev_init) {
ret = shost->hostt->sdev_init(sdev);
if (ret) {
@@ -1068,6 +1071,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
+ sdev->sdev_bflags = *bflags;
+
+ if (scsi_device_is_pseudo_dev(sdev))
+ return SCSI_SCAN_LUN_PRESENT;
+
/*
* No need to freeze the queue as it isn't reachable to anyone else yet.
*/
@@ -1113,7 +1121,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->max_queue_depth = sdev->queue_depth;
WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
- sdev->sdev_bflags = *bflags;
/*
* Ok, the device is now all set up, we can
@@ -1212,6 +1219,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev)
goto out;
+ if (scsi_device_is_pseudo_dev(sdev)) {
+ if (bflagsp)
+ *bflagsp = BLIST_NOLUN;
+ return SCSI_SCAN_LUN_PRESENT;
+ }
+
result = kmalloc(result_len, GFP_KERNEL);
if (!result)
goto out_free_sdev;
@@ -2083,12 +2096,65 @@ void scsi_forget_host(struct Scsi_Host *shost)
restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->__devices, siblings) {
- if (sdev->sdev_state == SDEV_DEL)
+ if (scsi_device_is_pseudo_dev(sdev) ||
+ sdev->sdev_state == SDEV_DEL)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_device(sdev);
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * Remove the pseudo device last since it may be needed during removal
+ * of other SCSI devices.
+ */
+ if (shost->pseudo_sdev)
+ __scsi_remove_device(shost->pseudo_sdev);
}
+/**
+ * scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
+ * @shost: Host that needs a pseudo SCSI device
+ *
+ * Lock status: None assumed.
+ *
+ * Returns: The scsi_device or NULL
+ *
+ * Notes:
+ * Attach a single scsi_device to the Scsi_Host. The primary aim for this
+ * device is to serve as a container from which SCSI commands can be
+ * allocated. Each SCSI command will carry a command tag allocated by the
+ * block layer. These SCSI commands can be used by the LLDD to send
+ * internal or passthrough commands without having to manage tag allocation
+ * inside the LLDD.
+ */
+struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev = NULL;
+ struct scsi_target *starget;
+
+ guard(mutex)(&shost->scan_mutex);
+
+ if (!scsi_host_scan_allowed(shost))
+ goto out;
+
+ starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
+ if (!starget)
+ goto out;
+
+ sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
+ if (!sdev) {
+ scsi_target_reap(starget);
+ goto put_target;
+ }
+
+ sdev->borken = 0;
+
+put_target:
+ /* See also the get_device(dev) call in scsi_alloc_target(). */
+ put_device(&starget->dev);
+
+out:
+ return sdev;
+}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 15ba493d2138..99eb0a30df61 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -605,68 +605,6 @@ sdev_show_##field (struct device *dev, struct device_attribute *attr, \
sdev_show_function(field, format_string) \
static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
-
-/*
- * sdev_rw_attr: create a function and attribute variable for a
- * read/write field.
- */
-#define sdev_rw_attr(field, format_string) \
- sdev_show_function(field, format_string) \
- \
-static ssize_t \
-sdev_store_##field (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct scsi_device *sdev; \
- sdev = to_scsi_device(dev); \
- sscanf (buf, format_string, &sdev->field); \
- return count; \
-} \
-static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
-
-/* Currently we don't export bit fields, but we might in future,
- * so leave this code in */
-#if 0
-/*
- * sdev_rd_attr: create a function and attribute variable for a
- * read/write bit field.
- */
-#define sdev_rw_attr_bit(field) \
- sdev_show_function(field, "%d\n") \
- \
-static ssize_t \
-sdev_store_##field (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- int ret; \
- struct scsi_device *sdev; \
- ret = scsi_sdev_check_buf_bit(buf); \
- if (ret >= 0) { \
- sdev = to_scsi_device(dev); \
- sdev->field = ret; \
- ret = count; \
- } \
- return ret; \
-} \
-static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
-
-/*
- * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
- * else return -EINVAL.
- */
-static int scsi_sdev_check_buf_bit(const char *buf)
-{
- if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
- if (buf[0] == '1')
- return 1;
- else if (buf[0] == '0')
- return 0;
- else
- return -EINVAL;
- } else
- return -EINVAL;
-}
-#endif
/*
* Create the actual show/store functions and data structures.
*/
@@ -710,10 +648,14 @@ static ssize_t
sdev_store_timeout (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct scsi_device *sdev;
- int timeout;
- sdev = to_scsi_device(dev);
- sscanf (buf, "%d\n", &timeout);
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int ret, timeout;
+
+ ret = kstrtoint(buf, 0, &timeout);
+ if (ret)
+ return ret;
+ if (timeout <= 0)
+ return -EINVAL;
blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
return count;
}
@@ -1406,6 +1348,9 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
int error;
struct scsi_target *starget = sdev->sdev_target;
+ if (WARN_ON_ONCE(scsi_device_is_pseudo_dev(sdev)))
+ return -EINVAL;
+
error = scsi_target_add(starget);
if (error)
return error;
@@ -1513,7 +1458,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
- if (sdev->host->hostt->sdev_destroy)
+ if (!scsi_device_is_pseudo_dev(sdev) && sdev->host->hostt->sdev_destroy)
sdev->host->hostt->sdev_destroy(sdev);
transport_destroy_device(dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3a821afee9bc..987befb02408 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -441,7 +441,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0;
- fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no);
+ fc_host->work_q = alloc_workqueue("fc_wq_%d", WQ_PERCPU, 0,
+ shost->host_no);
if (!fc_host->work_q)
return -ENOMEM;
@@ -3088,7 +3089,7 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
- rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", 0, 0,
+ rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", WQ_PERCPU, 0,
shost->host_no, rport->number);
if (!rport->devloss_work_q) {
printk(KERN_ERR "FC Remote Port alloc_workqueue failed\n");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 743b4c792ceb..ed21c032bbc4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3961,7 +3961,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- queue_work(system_unbound_wq, &session->destroy_work);
+ queue_work(system_dfl_wq, &session->destroy_work);
}
break;
case ISCSI_UEVENT_UNBIND_SESSION:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0252d3f6bed1..f2c0744b4480 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -318,6 +318,35 @@ static ssize_t manage_shutdown_store(struct device *dev,
}
static DEVICE_ATTR_RW(manage_shutdown);
+static ssize_t manage_restart_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_restart);
+}
+
+static ssize_t manage_restart_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_restart = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_restart);
+
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -654,6 +683,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_manage_system_start_stop.attr,
&dev_attr_manage_runtime_start_stop.attr,
&dev_attr_manage_shutdown.attr,
+ &dev_attr_manage_restart.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
@@ -4177,7 +4207,9 @@ static void sd_shutdown(struct device *dev)
(system_state == SYSTEM_POWER_OFF &&
sdkp->device->manage_shutdown) ||
(system_state == SYSTEM_RUNNING &&
- sdkp->device->manage_runtime_start_stop)) {
+ sdkp->device->manage_runtime_start_stop) ||
+ (system_state == SYSTEM_RESTART &&
+ sdkp->device->manage_restart)) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 36382eca941c..574af8243016 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -240,7 +240,7 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones, struct blk_report_zones_args *args);
#else /* CONFIG_BLK_DEV_ZONED */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index a8db66428f80..56e455fb5add 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -35,8 +35,7 @@ static bool sd_zbc_is_gap_zone(const u8 buf[64])
* @buf: SCSI zone descriptor.
* @idx: Index of the zone relative to the first zone reported by the current
* sd_zbc_report_zones() call.
- * @cb: Callback function pointer.
- * @data: Second argument passed to @cb.
+ * @args: report zones arguments (callback, etc)
*
* Return: Value returned by @cb.
*
@@ -44,12 +43,11 @@ static bool sd_zbc_is_gap_zone(const u8 buf[64])
* call @cb(blk_zone, @data).
*/
static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
- unsigned int idx, report_zones_cb cb, void *data)
+ unsigned int idx, struct blk_report_zones_args *args)
{
struct scsi_device *sdp = sdkp->device;
struct blk_zone zone = { 0 };
sector_t start_lba, gran;
- int ret;
if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf)))
return -EINVAL;
@@ -87,11 +85,7 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
else
zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- ret = cb(&zone, idx, data);
- if (ret)
- return ret;
-
- return 0;
+ return disk_report_zone(sdkp->disk, &zone, idx, args);
}
/**
@@ -217,14 +211,14 @@ static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
* @disk: Disk to report zones for.
* @sector: Start sector.
* @nr_zones: Maximum number of zones to report.
- * @cb: Callback function called to report zone information.
- * @data: Second argument passed to @cb.
+ * @args: Callback arguments.
*
* Called by the block layer to iterate over zone information. See also the
* disk->fops->report_zones() calls in block/blk-zoned.c.
*/
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args)
{
struct scsi_disk *sdkp = scsi_disk(disk);
sector_t lba = sectors_to_logical(sdkp->device, sector);
@@ -283,7 +277,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
}
ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
- cb, data);
+ args);
if (ret)
goto out;
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index e519df68d603..70c75ab1453a 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -133,6 +133,7 @@ static int sim710_probe_common(struct device *dev, unsigned long base_addr,
out_put_host:
scsi_host_put(host);
out_release:
+ ioport_unmap(hostdata->base);
release_region(base_addr, 64);
out_free:
kfree(hostdata);
@@ -148,6 +149,7 @@ static int sim710_device_remove(struct device *dev)
scsi_remove_host(host);
NCR_700_release(host);
+ ioport_unmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
release_region(host->base, 64);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 03c97e60d36f..fe549e2b7c94 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -34,11 +34,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.34-035"
+#define DRIVER_VERSION "2.1.36-026"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 34
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 36
+#define DRIVER_REVISION 26
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -5555,14 +5555,25 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
+/*
+ * Adjust the timeout value for physical devices sent to the firmware
+ * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds.
+ *
+ * This provides the firmware with additional time to attempt early recovery
+ * before the OS-level timeout occurs.
+ */
+#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv))
+
static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
+ u32 timeout;
size_t cdb_length;
struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
+ struct request *rq;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
@@ -5634,6 +5645,12 @@ static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
return SCSI_MLQUEUE_HOST_BUSY;
}
+ if (device->is_physical_device) {
+ rq = scsi_cmd_to_rq(scmd);
+ timeout = rq->timeout / HZ;
+ put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout);
+ }
+
pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
return 0;
@@ -6410,10 +6427,22 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev
static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{
+ unsigned long flags;
int rc;
mutex_lock(&ctrl_info->lun_reset_mutex);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "skipping reset of scsi %d:%d:%d:%u, device has been removed\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
@@ -6594,7 +6623,9 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
int mutex_acquired;
+ unsigned int lun;
unsigned long flags;
ctrl_info = shost_to_hba(sdev->host);
@@ -6621,8 +6652,13 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
mutex_unlock(&ctrl_info->scan_mutex);
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ cancel_work_sync(&tmf_work->work_struct);
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
pqi_dev_info(ctrl_info, "removed", device);
pqi_free_device(device);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
}
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
@@ -8936,7 +8972,8 @@ static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
- host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
+ host_memory_descriptor->host_chunk_virt_address =
+ kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
@@ -10110,6 +10147,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4840)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 74a6830b7ed8..168f25e4aaa3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3526,8 +3526,64 @@ static int partition_tape(struct scsi_tape *STp, int size)
out:
return result;
}
-
+/*
+ * Handles any extra state needed for ioctls which are not st-specific.
+ * Called with the scsi_tape lock held, released before return
+ */
+static long st_common_ioctl(struct scsi_tape *STp, struct st_modedef *STm,
+ struct file *file, unsigned int cmd_in,
+ unsigned long arg)
+{
+ int i, retval = 0;
+
+ if (!STm->defined) {
+ retval = -ENXIO;
+ goto out;
+ }
+
+ switch (cmd_in) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SCSI_IOCTL_GET_PCI:
+ break;
+ case SG_IO:
+ case SCSI_IOCTL_SEND_COMMAND:
+ case CDROM_SEND_PACKET:
+ if (!capable(CAP_SYS_RAWIO)) {
+ retval = -EPERM;
+ goto out;
+ }
+ fallthrough;
+ default:
+ if ((i = flush_buffer(STp, 0)) < 0) {
+ retval = i;
+ goto out;
+ } else { /* flush_buffer succeeds */
+ if (STp->can_partitions) {
+ i = switch_partition(STp);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ }
+ }
+ }
+ mutex_unlock(&STp->lock);
+
+ retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE,
+ cmd_in, (void __user *)arg);
+ if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
+ /* unload */
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
+ }
+
+ return retval;
+out:
+ mutex_unlock(&STp->lock);
+ return retval;
+}
/* The ioctl command */
static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
@@ -3565,6 +3621,15 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
if (retval)
goto out;
+ switch (cmd_in) {
+ case MTIOCPOS:
+ case MTIOCGET:
+ case MTIOCTOP:
+ break;
+ default:
+ return st_common_ioctl(STp, STm, file, cmd_in, arg);
+ }
+
cmd_type = _IOC_TYPE(cmd_in);
cmd_nr = _IOC_NR(cmd_in);
@@ -3876,29 +3941,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
mt_pos.mt_blkno = blk;
retval = put_user_mtpos(p, &mt_pos);
- goto out;
- }
- mutex_unlock(&STp->lock);
-
- switch (cmd_in) {
- case SG_IO:
- case SCSI_IOCTL_SEND_COMMAND:
- case CDROM_SEND_PACKET:
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- break;
- default:
- break;
}
-
- retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p);
- if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
- /* unload */
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
-
out:
mutex_unlock(&STp->lock);
return retval;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index e6357bc301cb..93c223e0a777 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1844,6 +1844,7 @@ out_release_regions:
out_scsi_host_put:
scsi_host_put(host);
out_disable:
+ unregister_reboot_notifier(&stex_notifier);
pci_disable_device(pdev);
return err;