summaryrefslogtreecommitdiff
path: root/drivers/crypto/hisilicon/qm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/hisilicon/qm.c')
-rw-r--r--drivers/crypto/hisilicon/qm.c57
1 files changed, 42 insertions, 15 deletions
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index a5b96adf2d1e..be25ecbdba69 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -64,10 +64,10 @@
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
-#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
+#define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_EQE_CQN_MASK GENMASK(15, 0)
-#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
+#define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
#define QM_AEQE_TYPE_MASK 0xf
#define QM_AEQE_CQN_MASK GENMASK(15, 0)
@@ -976,23 +976,23 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
{
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qm_poll_data *poll_data = NULL;
+ u32 dw0 = le32_to_cpu(eqe->dw0);
u16 eq_depth = qm->eq_depth;
u16 cqn, eqe_num = 0;
- if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
+ if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
atomic64_inc(&qm->debug.dfx.err_irq_cnt);
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
return;
}
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+ cqn = dw0 & QM_EQE_CQN_MASK;
if (unlikely(cqn >= qm->qp_num))
return;
poll_data = &qm->poll_data[cqn];
- while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
- poll_data->qp_finish_id[eqe_num] = cqn;
+ while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
+ poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK;
eqe_num++;
if (qm->status.eq_head == eq_depth - 1) {
@@ -1006,6 +1006,8 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
if (eqe_num == (eq_depth >> 1) - 1)
break;
+
+ dw0 = le32_to_cpu(eqe->dw0);
}
poll_data->eqe_num = eqe_num;
@@ -1098,15 +1100,15 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u32 dw0 = le32_to_cpu(aeqe->dw0);
u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
- while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
- QM_AEQE_TYPE_MASK;
- qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
+ while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) {
+ type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK;
+ qp_id = dw0 & QM_AEQE_CQN_MASK;
switch (type) {
case QM_EQ_OVERFLOW:
@@ -1134,6 +1136,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
aeqe++;
qm->status.aeq_head++;
}
+ dw0 = le32_to_cpu(aeqe->dw0);
}
qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
@@ -1283,6 +1286,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
}
break;
+ /*
+ * Note: The current logic only needs to handle the above three types
+ * If new types are added, they need to be supplemented here,
+ * otherwise undefined behavior may occur.
+ */
+ default:
+ break;
}
}
@@ -2652,10 +2662,10 @@ static int qm_hw_err_isolate(struct hisi_qm *qm)
}
}
list_add(&hw_err->list, &isolate->qm_hw_errs);
- mutex_unlock(&isolate->isolate_lock);
if (count >= isolate->err_threshold)
isolate->is_isolate = true;
+ mutex_unlock(&isolate->isolate_lock);
return 0;
}
@@ -2664,12 +2674,10 @@ static void qm_hw_err_destroy(struct hisi_qm *qm)
{
struct qm_hw_err *err, *tmp;
- mutex_lock(&qm->isolate_data.isolate_lock);
list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
list_del(&err->list);
kfree(err);
}
- mutex_unlock(&qm->isolate_data.isolate_lock);
}
static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
@@ -2697,10 +2705,12 @@ static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
if (qm->isolate_data.is_isolate)
return -EPERM;
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm->isolate_data.err_threshold = num;
/* After the policy is updated, need to reset the hardware err list */
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
return 0;
}
@@ -2737,7 +2747,10 @@ static void qm_remove_uacce(struct hisi_qm *qm)
struct uacce_device *uacce = qm->uacce;
if (qm->use_sva) {
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
+
uacce_remove(uacce);
qm->uacce = NULL;
}
@@ -3678,6 +3691,7 @@ static void qm_clear_vft_config(struct hisi_qm *qm)
static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
{
struct device *dev = &qm->pdev->dev;
+ struct qm_shaper_factor t_factor;
u32 ir = qos * QM_QOS_RATE;
int ret, total_vfs, i;
@@ -3685,6 +3699,7 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
if (fun_index > total_vfs)
return -EINVAL;
+ memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor));
qm->factor[fun_index].func_qos = qos;
ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
@@ -3698,11 +3713,21 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
if (ret) {
dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
- return -EINVAL;
+ goto back_func_qos;
}
}
return 0;
+
+back_func_qos:
+ memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor));
+ for (i--; i >= ALG_TYPE_0; i--) {
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
+ if (ret)
+ dev_err(dev, "failed to restore shaper vft during rollback!\n");
+ }
+
+ return -EINVAL;
}
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
@@ -3871,10 +3896,12 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
pdev = container_of(dev, struct pci_dev, dev);
if (pci_physfn(pdev) != qm->pdev) {
pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ put_device(dev);
return -EINVAL;
}
*fun_index = pdev->devfn;
+ put_device(dev);
return 0;
}