summaryrefslogtreecommitdiff
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-mcq.c6
-rw-r--r--drivers/ufs/core/ufs-sysfs.c133
-rw-r--r--drivers/ufs/core/ufshcd.c103
-rw-r--r--drivers/ufs/host/ufs-qcom.c181
-rw-r--r--drivers/ufs/host/ufs-qcom.h11
5 files changed, 395 insertions, 39 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index f1294c29f484..1e50675772fe 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -674,7 +674,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
- unsigned long flags;
int err;
/* Skip task abort in case previous aborts failed and report failure */
@@ -713,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
return FAILED;
}
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
-
return SUCCESS;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 634cf163f4cb..de8b6acd4058 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -57,6 +57,36 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
}
}
+static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint)
+{
+ switch (hint) {
+ case WB_RESIZE_HINT_KEEP:
+ return "keep";
+ case WB_RESIZE_HINT_DECREASE:
+ return "decrease";
+ case WB_RESIZE_HINT_INCREASE:
+ return "increase";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
+{
+ switch (status) {
+ case WB_RESIZE_STATUS_IDLE:
+ return "idle";
+ case WB_RESIZE_STATUS_IN_PROGRESS:
+ return "in_progress";
+ case WB_RESIZE_STATUS_COMPLETE_SUCCESS:
+ return "complete_success";
+ case WB_RESIZE_STATUS_GENERAL_FAILURE:
+ return "general_failure";
+ default:
+ return "unknown";
+ }
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -411,6 +441,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev,
return count;
}
+static const char * const wb_resize_en_mode[] = {
+ [WB_RESIZE_EN_IDLE] = "idle",
+ [WB_RESIZE_EN_DECREASE] = "decrease",
+ [WB_RESIZE_EN_INCREASE] = "increase",
+};
+
+static ssize_t wb_resize_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ mode = sysfs_match_string(wb_resize_en_mode, buf);
+ if (mode < 0)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_set_resize_en(hba, mode);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
/**
* pm_qos_enable_show - sysfs handler to show pm qos enable value
* @dev: device associated with the UFS controller
@@ -526,6 +594,7 @@ static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
+static DEVICE_ATTR_WO(wb_resize_enable);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
static DEVICE_ATTR_RO(critical_health);
@@ -543,6 +612,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
+ &dev_attr_wb_resize_enable.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
&dev_attr_critical_health.attr,
@@ -1549,6 +1619,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
+static int wb_read_resize_attrs(struct ufs_hba *hba,
+ enum attr_idn idn, u32 *attr_val)
+{
+ u8 index = 0;
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ index = ufshcd_wb_get_query_index(hba);
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ idn, index, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t wb_resize_hint_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_hint);
+
+static ssize_t wb_resize_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_status);
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -1622,6 +1753,8 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_wb_avail_buf.attr,
&dev_attr_wb_life_time_est.attr,
&dev_attr_wb_cur_buf.attr,
+ &dev_attr_wb_resize_hint.attr,
+ &dev_attr_wb_resize_status.attr,
NULL,
};
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 7735421e3991..76cedd30c274 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -53,7 +53,7 @@
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
- UIC_CMD_TIMEOUT_MAX = 2000,
+ UIC_CMD_TIMEOUT_MAX = 5000,
};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
@@ -63,7 +63,11 @@ enum {
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+enum {
+ QUERY_REQ_TIMEOUT_MIN = 1,
+ QUERY_REQ_TIMEOUT_DEFAULT = 1500,
+ QUERY_REQ_TIMEOUT_MAX = 30000
+};
/* Advanced RPMB request timeout */
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
@@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = {
module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
MODULE_PARM_DESC(uic_cmd_timeout,
- "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
+
+static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT;
+
+static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN,
+ QUERY_REQ_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops dev_cmd_timeout_ops = {
+ .set = dev_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
+MODULE_PARM_DESC(dev_cmd_timeout,
+ "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
@@ -432,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
u8 opcode = 0, group_id = 0;
u32 doorbell = 0;
u32 intr;
- int hwq_id = -1;
+ u32 hwq_id = 0;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -644,9 +665,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
- dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- div_u64(hba->ufs_stats.last_intr_ts, 1000),
- hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
@@ -3365,7 +3383,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
+ int timeout = dev_cmd_timeout;
BUG_ON(!hba);
@@ -3462,7 +3480,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3558,7 +3576,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -6020,7 +6038,7 @@ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: failed to read device level exception %d\n",
@@ -6107,6 +6125,21 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
+int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode)
+{
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
@@ -6572,7 +6605,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
dev_info(hba->dev,
- "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n",
__func__, ufshcd_state_name[hba->ufshcd_state],
hba->is_powered, hba->shutting_down, hba->saved_err,
hba->saved_uic_err, hba->force_reset,
@@ -7001,7 +7034,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
}
/**
- * ufshcd_intr - Main interrupt service routine
+ * ufshcd_threaded_intr - Threaded interrupt service routine
* @irq: irq number
* @__hba: pointer to adapter instance
*
@@ -7009,16 +7042,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
+static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status = 0;
+ u32 last_intr_status, intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = local_clock();
+ last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -7042,7 +7073,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
- hba->ufs_stats.last_intr_status,
+ last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -7050,6 +7081,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
return retval;
}
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Return:
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_WAKE_THREAD - If handling is moved to threaded handled
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+
+ /* Move interrupt handling to thread when MCQ & ESI are not enabled */
+ if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
+ return IRQ_WAKE_THREAD;
+
+ /* Directly handle interrupts since MCQ ESI handlers does the hard job */
+ return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE));
+}
+
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
@@ -7245,7 +7299,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -8107,6 +8161,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
*/
dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->ext_wb_sup = get_unaligned_be16(desc_buf +
+ DEVICE_DESC_PARAM_EXT_WB_SUP);
+
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
@@ -8678,7 +8735,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
@@ -8793,6 +8850,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
+ hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
intrs = UFSHCD_ENABLE_MCQ_INTRS;
@@ -10654,7 +10712,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr,
+ IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
goto out_disable;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 77f8ddb1f207..37887ec68412 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -5,6 +5,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/devfreq.h>
#include <linux/gpio/consumer.h>
@@ -102,6 +103,24 @@ static const struct __ufs_qcom_bw_table {
[MODE_MAX][0][0] = { 7643136, 819200 },
};
+static const struct {
+ int nminor;
+ char *prefix;
+} testbus_info[TSTBUS_MAX] = {
+ [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"},
+ [TSTBUS_UARM] = {32, "TSTBUS_UARM"},
+ [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"},
+ [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"},
+ [TSTBUS_DFC] = {32, "TSTBUS_DFC"},
+ [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"},
+ [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"},
+ [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"},
+ [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"},
+ [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"},
+ [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"},
+ [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"},
+};
+
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
@@ -173,7 +192,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
profile->ll_ops = ufs_qcom_crypto_ops;
profile->max_dun_bytes_supported = 8;
- profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+ profile->key_types_supported = qcom_ice_get_supported_key_type(ice);
profile->dev = dev;
/*
@@ -221,17 +240,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
- /* Only AES-256-XTS has been tested so far. */
- if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
- return -EOPNOTSUPP;
-
ufshcd_hold(hba);
- err = qcom_ice_program_key(host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->bytes,
- key->crypto_cfg.data_unit_size / 512,
- slot);
+ err = qcom_ice_program_key(host->ice, slot, key);
ufshcd_release(hba);
return err;
}
@@ -250,9 +260,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile,
return err;
}
+static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size,
+ sw_secret);
+}
+
+static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key);
+}
+
+static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_generate_key(host->ice, lt_key);
+}
+
+static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key);
+}
+
static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = {
.keyslot_program = ufs_qcom_ice_keyslot_program,
.keyslot_evict = ufs_qcom_ice_keyslot_evict,
+ .derive_sw_secret = ufs_qcom_ice_derive_sw_secret,
+ .import_key = ufs_qcom_ice_import_key,
+ .generate_key = ufs_qcom_ice_generate_key,
+ .prepare_key = ufs_qcom_ice_prepare_key,
};
#else
@@ -1609,6 +1663,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
return 0;
}
+static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int i, j, nminor = 0, testbus_len = 0;
+ u32 *testbus __free(kfree) = NULL;
+ char *prefix;
+
+ testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ for (j = 0; j < TSTBUS_MAX; j++) {
+ nminor = testbus_info[j].nminor;
+ prefix = testbus_info[j].prefix;
+ host->testbus.select_major = j;
+ testbus_len = nminor * sizeof(u32);
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ }
+}
+
+static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix, enum ufshcd_res id)
+{
+ u32 *regs __free(kfree) = NULL;
+ size_t pos;
+
+ if (offset % 4 != 0 || len % 4 != 0)
+ return -EINVAL;
+
+ regs = kzalloc(len, GFP_ATOMIC);
+ if (!regs)
+ return -ENOMEM;
+
+ for (pos = 0; pos < len; pos += 4)
+ regs[pos / 4] = readl(hba->res[id].base + offset + pos);
+
+ print_hex_dump(KERN_ERR, prefix,
+ len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+ 16, 4, regs, len, false);
+
+ return 0;
+}
+
+static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
+{
+ struct dump_info {
+ size_t offset;
+ size_t len;
+ const char *prefix;
+ enum ufshcd_res id;
+ };
+
+ struct dump_info mcq_dumps[] = {
+ {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
+ {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
+ {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
+ {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
+ {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
+ {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
+ {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
+ {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
+ {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
+ {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
+ {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
+ ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
+ mcq_dumps[i].prefix, mcq_dumps[i].id);
+ cond_resched();
+ }
+}
+
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
u32 reg;
@@ -1616,6 +1749,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
host = ufshcd_get_variant(hba);
+ dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT));
+ dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT));
+ dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n",
+ ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT));
+
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
@@ -1658,6 +1800,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
+
+ if (hba->mcq_enabled) {
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ);
+ ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers ");
+ }
+
+ /* ensure below dumps occur only in task context due to blocking calls. */
+ if (in_task()) {
+ /* Dump MCQ Host Vendor Specific Registers */
+ if (hba->mcq_enabled)
+ ufs_qcom_dump_mcq_hci_regs(hba);
+
+ /* voluntarily yield the CPU as we are dumping too much data */
+ ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
+ cond_resched();
+ ufs_qcom_dump_testbus(hba);
+ }
}
/**
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 05d4cb569c50..0a5cfc2dd4f7 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -50,6 +50,8 @@ enum {
*/
UFS_AH8_CFG = 0xFC,
+ UFS_RD_REG_MCQ = 0xD00,
+
REG_UFS_MEM_ICE_CONFIG = 0x260C,
REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
@@ -75,6 +77,15 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+/* QCOM UFS HC vendor specific Hibern8 count registers */
+enum {
+ REG_UFS_HW_H8_ENTER_CNT = 0x2700,
+ REG_UFS_SW_H8_ENTER_CNT = 0x2704,
+ REG_UFS_SW_AFTER_HW_H8_ENTER_CNT = 0x2708,
+ REG_UFS_HW_H8_EXIT_CNT = 0x270C,
+ REG_UFS_SW_H8_EXIT_CNT = 0x2710,
+};
+
enum {
UFS_MEM_CQIS_VS = 0x8,
};