diff options
Diffstat (limited to 'drivers/ufs')
-rw-r--r-- | drivers/ufs/core/ufs-mcq.c | 18 | ||||
-rw-r--r-- | drivers/ufs/core/ufs-sysfs.c | 187 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd-priv.h | 1 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 226 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-exynos.c | 85 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-exynos.h | 6 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 309 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.h | 29 |
8 files changed, 730 insertions, 131 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 240ce135bbfb..1e50675772fe 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -674,16 +674,8 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; - unsigned long flags; int err; - if (!ufshcd_cmd_inflight(lrbp->cmd)) { - dev_err(hba->dev, - "%s: skip abort. cmd at tag %d already completed.\n", - __func__, tag); - return FAILED; - } - /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", @@ -692,6 +684,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) } hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) { + dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", + __func__, tag); + return FAILED; + } if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { /* @@ -715,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) return FAILED; } - spin_lock_irqsave(&hwq->cq_lock, flags); - if (ufshcd_cmd_inflight(lrbp->cmd)) - ufshcd_release_scsi_cmd(hba, lrbp); - spin_unlock_irqrestore(&hwq->cq_lock, flags); - return SUCCESS; } diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 90b5ab60f5ae..de8b6acd4058 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -57,6 +57,36 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear) } } +static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint) +{ + switch (hint) { + case WB_RESIZE_HINT_KEEP: + return "keep"; + case WB_RESIZE_HINT_DECREASE: + return "decrease"; + case WB_RESIZE_HINT_INCREASE: + return "increase"; + default: + return "unknown"; + } +} + +static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status) +{ + switch (status) { + case WB_RESIZE_STATUS_IDLE: + return "idle"; + case WB_RESIZE_STATUS_IN_PROGRESS: + return "in_progress"; + case WB_RESIZE_STATUS_COMPLETE_SUCCESS: + return "complete_success"; + case WB_RESIZE_STATUS_GENERAL_FAILURE: + return "general_failure"; + default: + return "unknown"; + } +} + static const char *ufshcd_uic_link_state_to_string( enum uic_link_state state) { @@ -411,6 +441,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev, return count; } +static const char * const wb_resize_en_mode[] = { + [WB_RESIZE_EN_IDLE] = "idle", + [WB_RESIZE_EN_DECREASE] = "decrease", + [WB_RESIZE_EN_INCREASE] = "increase", +}; + +static ssize_t wb_resize_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + ssize_t res; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + mode = sysfs_match_string(wb_resize_en_mode, buf); + if (mode < 0) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + res = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + res = ufshcd_wb_set_resize_en(hba, mode); + ufshcd_rpm_put_sync(hba); + +out: + up(&hba->host_sem); + return res < 0 ? res : count; +} + /** * pm_qos_enable_show - sysfs handler to show pm qos enable value * @dev: device associated with the UFS controller @@ -466,6 +534,56 @@ static ssize_t critical_health_show(struct device *dev, return sysfs_emit(buf, "%d\n", hba->critical_health_count); } +static ssize_t device_lvl_exception_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (hba->dev_info.wspecversion < 0x410) + return -EOPNOTSUPP; + + return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count)); +} + +static ssize_t device_lvl_exception_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int value; + + if (kstrtouint(buf, 0, &value)) + return -EINVAL; + + /* the only supported usecase is to reset the dev_lvl_exception_count */ + if (value) + return -EINVAL; + + atomic_set(&hba->dev_lvl_exception_count, 0); + + return count; +} + +static ssize_t device_lvl_exception_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u64 exception_id; + int err; + + ufshcd_rpm_get_sync(hba); + err = ufshcd_read_device_lvl_exception_id(hba, &exception_id); + ufshcd_rpm_put_sync(hba); + + if (err) + return err; + + hba->dev_lvl_exception_id = exception_id; + return sysfs_emit(buf, "%llu\n", exception_id); +} + static DEVICE_ATTR_RW(rpm_lvl); static DEVICE_ATTR_RO(rpm_target_dev_state); static DEVICE_ATTR_RO(rpm_target_link_state); @@ -476,9 +594,12 @@ static DEVICE_ATTR_RW(auto_hibern8); static DEVICE_ATTR_RW(wb_on); static DEVICE_ATTR_RW(enable_wb_buf_flush); static DEVICE_ATTR_RW(wb_flush_threshold); +static DEVICE_ATTR_WO(wb_resize_enable); static DEVICE_ATTR_RW(rtc_update_ms); static DEVICE_ATTR_RW(pm_qos_enable); static DEVICE_ATTR_RO(critical_health); +static DEVICE_ATTR_RW(device_lvl_exception_count); +static DEVICE_ATTR_RO(device_lvl_exception_id); static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_rpm_lvl.attr, @@ -491,9 +612,12 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_wb_on.attr, &dev_attr_enable_wb_buf_flush.attr, &dev_attr_wb_flush_threshold.attr, + &dev_attr_wb_resize_enable.attr, &dev_attr_rtc_update_ms.attr, &dev_attr_pm_qos_enable.attr, &dev_attr_critical_health.attr, + &dev_attr_device_lvl_exception_count.attr, + &dev_attr_device_lvl_exception_id.attr, NULL }; @@ -1495,6 +1619,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE; } +static int wb_read_resize_attrs(struct ufs_hba *hba, + enum attr_idn idn, u32 *attr_val) +{ + u8 index = 0; + int ret; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + index = ufshcd_wb_get_query_index(hba); + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + idn, index, 0, attr_val); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return ret; +} + +static ssize_t wb_resize_hint_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_hint); + +static ssize_t wb_resize_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_status); + #define UFS_ATTRIBUTE(_name, _uname) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -1568,6 +1753,8 @@ static struct attribute *ufs_sysfs_attributes[] = { &dev_attr_wb_avail_buf.attr, &dev_attr_wb_life_time_est.attr, &dev_attr_wb_cur_buf.attr, + &dev_attr_wb_resize_hint.attr, + &dev_attr_wb_resize_status.attr, NULL, }; diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 10b4a19a70f1..d0a2c963a27d 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, enum query_opcode desc_op); int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); +int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id); /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 0534390c2a35..76cedd30c274 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -53,7 +53,7 @@ /* UIC command timeout, unit: ms */ enum { UIC_CMD_TIMEOUT_DEFAULT = 500, - UIC_CMD_TIMEOUT_MAX = 2000, + UIC_CMD_TIMEOUT_MAX = 5000, }; /* NOP OUT retries waiting for NOP IN response */ #define NOP_OUT_RETRIES 10 @@ -63,7 +63,11 @@ enum { /* Query request retries */ #define QUERY_REQ_RETRIES 3 /* Query request timeout */ -#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ +enum { + QUERY_REQ_TIMEOUT_MIN = 1, + QUERY_REQ_TIMEOUT_DEFAULT = 1500, + QUERY_REQ_TIMEOUT_MAX = 30000 +}; /* Advanced RPMB request timeout */ #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */ @@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = { module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644); MODULE_PARM_DESC(uic_cmd_timeout, - "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively"); + "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively"); + +static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT; + +static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN, + QUERY_REQ_TIMEOUT_MAX); +} + +static const struct kernel_param_ops dev_cmd_timeout_ops = { + .set = dev_cmd_timeout_set, + .get = param_get_uint, +}; + +module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644); +MODULE_PARM_DESC(dev_cmd_timeout, + "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively"); #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ @@ -278,6 +299,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = { .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | + UFS_DEVICE_QUIRK_PA_HIBER8TIME | UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = UFS_ANY_MODEL, @@ -431,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, u8 opcode = 0, group_id = 0; u32 doorbell = 0; u32 intr; - int hwq_id = -1; + u32 hwq_id = 0; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct scsi_cmnd *cmd = lrbp->cmd; struct request *rq = scsi_cmd_to_rq(cmd); @@ -643,9 +665,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), hba->ufs_stats.hibern8_exit_cnt); - dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", - div_u64(hba->ufs_stats.last_intr_ts, 1000), - hba->ufs_stats.last_intr_status); dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", hba->eh_flags, hba->req_abort_count); dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", @@ -3176,16 +3195,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, int err; retry: - time_left = wait_for_completion_timeout(hba->dev_cmd.complete, + time_left = wait_for_completion_timeout(&hba->dev_cmd.complete, time_left); if (likely(time_left)) { - /* - * The completion handler called complete() and the caller of - * this function still owns the @lrbp tag so the code below does - * not trigger any race conditions. - */ - hba->dev_cmd.complete = NULL; err = ufshcd_get_tr_ocs(lrbp, NULL); if (!err) err = ufshcd_dev_cmd_completion(hba, lrbp); @@ -3199,7 +3212,6 @@ retry: /* successfully cleared the command, retry if needed */ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) err = -EAGAIN; - hba->dev_cmd.complete = NULL; return err; } @@ -3215,11 +3227,9 @@ retry: spin_lock_irqsave(&hba->outstanding_lock, flags); pending = test_bit(lrbp->task_tag, &hba->outstanding_reqs); - if (pending) { - hba->dev_cmd.complete = NULL; + if (pending) __clear_bit(lrbp->task_tag, &hba->outstanding_reqs); - } spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (!pending) { @@ -3237,8 +3247,6 @@ retry: spin_lock_irqsave(&hba->outstanding_lock, flags); pending = test_bit(lrbp->task_tag, &hba->outstanding_reqs); - if (pending) - hba->dev_cmd.complete = NULL; spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (!pending) { @@ -3272,13 +3280,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) { - DECLARE_COMPLETION_ONSTACK(wait); int err; - hba->dev_cmd.complete = &wait; - ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - ufshcd_send_command(hba, tag, hba->dev_cmd_queue); err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); @@ -3379,7 +3383,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, struct ufs_query_req *request = NULL; struct ufs_query_res *response = NULL; int err, selector = 0; - int timeout = QUERY_REQ_TIMEOUT; + int timeout = dev_cmd_timeout; BUG_ON(!hba); @@ -3476,7 +3480,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", @@ -3572,7 +3576,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", @@ -5585,12 +5589,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, ufshcd_release_scsi_cmd(hba, lrbp); /* Do not touch lrbp after scsi done */ scsi_done(cmd); - } else if (hba->dev_cmd.complete) { + } else { if (cqe) { ocs = le32_to_cpu(cqe->status) & MASK_OCS; lrbp->utr_descriptor_ptr->header.ocs = ocs; } - complete(hba->dev_cmd.complete); + complete(&hba->dev_cmd.complete); } } @@ -5692,6 +5696,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, continue; hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) + continue; if (force_compl) { ufshcd_mcq_compl_all_cqes_lock(hba, hwq); @@ -6013,6 +6019,42 @@ out: __func__, err); } +int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) +{ + struct utp_upiu_query_v4_0 *upiu_resp; + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + if (hba->dev_info.wspecversion < 0x410) + return -EOPNOTSUPP; + + ufshcd_hold(hba); + mutex_lock(&hba->dev_cmd.lock); + + ufshcd_init_query(hba, &request, &response, + UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0); + + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); + + if (err) { + dev_err(hba->dev, "%s: failed to read device level exception %d\n", + __func__, err); + goto out; + } + + upiu_resp = (struct utp_upiu_query_v4_0 *)response; + *exception_id = get_unaligned_be64(&upiu_resp->osf3); +out: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + + return err; +} + static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) { u8 index; @@ -6083,7 +6125,22 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) return ret; } -static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, +int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode) +{ + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode); + if (ret) + dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n", + __func__, ret); + + return ret; +} + +static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba, u32 avail_buf) { u32 cur_buf; @@ -6165,15 +6222,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba) } /* - * The ufs device needs the vcc to be ON to flush. * With user-space reduction enabled, it's enough to enable flush * by checking only the available buffer. The threshold * defined here is > 90% full. * With user-space preserved enabled, the current-buffer * should be checked too because the wb buffer size can reduce * when disk tends to be full. This info is provided by current - * buffer (dCurrentWriteBoosterBufferSize). There's no point in - * keeping vcc on when current buffer is empty. + * buffer (dCurrentWriteBoosterBufferSize). */ index = ufshcd_wb_get_query_index(hba); ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, @@ -6188,7 +6243,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba) if (!hba->dev_info.b_presrv_uspc_en) return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); - return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); + return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf); } static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) @@ -6240,6 +6295,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work) sysfs_notify(&hba->dev->kobj, NULL, "critical_health"); } + if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) { + atomic_inc(&hba->dev_lvl_exception_count); + sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count"); + } + ufs_debugfs_exception_event(hba, status); } @@ -6545,7 +6605,7 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); dev_info(hba->dev, - "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", + "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n", __func__, ufshcd_state_name[hba->ufshcd_state], hba->is_powered, hba->shutting_down, hba->saved_err, hba->saved_uic_err, hba->force_reset, @@ -6974,7 +7034,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) } /** - * ufshcd_intr - Main interrupt service routine + * ufshcd_threaded_intr - Threaded interrupt service routine * @irq: irq number * @__hba: pointer to adapter instance * @@ -6982,16 +7042,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_intr(int irq, void *__hba) +static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status = 0; + u32 last_intr_status, intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; - intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - hba->ufs_stats.last_intr_status = intr_status; - hba->ufs_stats.last_intr_ts = local_clock(); + last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); /* * There could be max of hba->nutrs reqs in flight and in worst case @@ -7015,7 +7073,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", __func__, intr_status, - hba->ufs_stats.last_intr_status, + last_intr_status, enabled_intr_status); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); } @@ -7023,6 +7081,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) return retval; } +/** + * ufshcd_intr - Main interrupt service routine + * @irq: irq number + * @__hba: pointer to adapter instance + * + * Return: + * IRQ_HANDLED - If interrupt is valid + * IRQ_WAKE_THREAD - If handling is moved to threaded handled + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_intr(int irq, void *__hba) +{ + struct ufs_hba *hba = __hba; + + /* Move interrupt handling to thread when MCQ & ESI are not enabled */ + if (!hba->mcq_enabled || !hba->mcq_esi_enabled) + return IRQ_WAKE_THREAD; + + /* Directly handle interrupts since MCQ ESI handlers does the hard job */ + return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) & + ufshcd_readl(hba, REG_INTERRUPT_ENABLE)); +} + static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; @@ -7218,7 +7299,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, * bound to fail since dev_cmd.query and dev_cmd.type were left empty. * read the response directly ignoring all errors. */ - ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT); + ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout); /* just copy the upiu response as it is */ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); @@ -7238,8 +7319,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, err = -EINVAL; } } - ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, - (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); return err; } @@ -8082,6 +8161,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) */ dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + dev_info->ext_wb_sup = get_unaligned_be16(desc_buf + + DEVICE_DESC_PARAM_EXT_WB_SUP); + dev_info->b_presrv_uspc_en = desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; @@ -8139,6 +8221,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) } } +static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf) +{ + u32 ext_ufs_feature; + + if (hba->dev_info.wspecversion < 0x410) + return; + + ext_ufs_feature = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP)) + return; + + atomic_set(&hba->dev_lvl_exception_count, 0); + ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION); +} + static void ufshcd_set_rtt(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; @@ -8339,6 +8437,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba) ufs_init_rtc(hba, desc_buf); + ufshcd_device_lvl_exception_probe(hba, desc_buf); + /* * ufshcd_read_string_desc returns size of the string * reset the error value @@ -8428,6 +8528,31 @@ out: return ret; } +/** + * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME. + * @hba: per-adapter instance + * + * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter + * to ensure proper hibernation timing. This function retrieves the current + * PA_HIBERN8TIME value and increments it by 100us. + */ +static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba) +{ + u32 pa_h8time; + int ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time); + if (ret) { + dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret); + return; + } + + /* Increment by 1 to increase hibernation time by 100 µs */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1); + if (ret) + dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret); +} + static void ufshcd_tune_unipro_params(struct ufs_hba *hba) { ufshcd_vops_apply_dev_quirks(hba); @@ -8438,6 +8563,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) ufshcd_quirk_tune_host_pa_tactivate(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME) + ufshcd_quirk_override_pa_h8time(hba); } static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) @@ -8607,7 +8735,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3); - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) dev_err(hba->dev, "%s: failed to set timestamp %d\n", @@ -8722,6 +8850,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) u32 intrs; ret = ufshcd_mcq_vops_config_esi(hba); + hba->mcq_esi_enabled = !ret; dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); intrs = UFSHCD_ENABLE_MCQ_INTRS; @@ -10490,6 +10619,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE); + init_completion(&hba->dev_cmd.complete); + err = ufshcd_hba_init(hba); if (err) goto out_error; @@ -10581,7 +10712,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_readl(hba, REG_INTERRUPT_ENABLE); /* IRQ registration */ - err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr, + IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); goto out_disable; diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index d7539cda97da..3e545af536e5 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -34,7 +34,7 @@ * Exynos's Vendor specific registers for UFSHCI */ #define HCI_TXPRDT_ENTRY_SIZE 0x00 -#define PRDT_PREFECT_EN BIT(31) +#define PRDT_PREFETCH_EN BIT(31) #define HCI_RXPRDT_ENTRY_SIZE 0x04 #define HCI_1US_TO_CNT_VAL 0x0C #define CNT_VAL_1US_MASK 0x3FF @@ -92,11 +92,16 @@ UIC_TRANSPORT_NO_CONNECTION_RX |\ UIC_TRANSPORT_BAD_TC) -/* FSYS UFS Shareability */ -#define UFS_WR_SHARABLE BIT(2) -#define UFS_RD_SHARABLE BIT(1) -#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE) -#define UFS_SHAREABILITY_OFFSET 0x710 +/* UFS Shareability */ +#define UFS_EXYNOSAUTO_WR_SHARABLE BIT(2) +#define UFS_EXYNOSAUTO_RD_SHARABLE BIT(1) +#define UFS_EXYNOSAUTO_SHARABLE (UFS_EXYNOSAUTO_WR_SHARABLE | \ + UFS_EXYNOSAUTO_RD_SHARABLE) +#define UFS_GS101_WR_SHARABLE BIT(1) +#define UFS_GS101_RD_SHARABLE BIT(0) +#define UFS_GS101_SHARABLE (UFS_GS101_WR_SHARABLE | \ + UFS_GS101_RD_SHARABLE) +#define UFS_SHAREABILITY_OFFSET 0x710 /* Multi-host registers */ #define MHCTRL 0xC4 @@ -209,8 +214,8 @@ static int exynos_ufs_shareability(struct exynos_ufs *ufs) /* IO Coherency setting */ if (ufs->sysreg) { return regmap_update_bits(ufs->sysreg, - ufs->shareability_reg_offset, - UFS_SHARABLE, UFS_SHARABLE); + ufs->iocc_offset, + ufs->iocc_mask, ufs->iocc_val); } return 0; @@ -957,6 +962,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs) } phy_set_bus_width(generic_phy, ufs->avail_ln_rx); + + if (generic_phy->power_count) { + phy_power_off(generic_phy); + phy_exit(generic_phy); + } + ret = phy_init(generic_phy); if (ret) { dev_err(hba->dev, "%s: phy init failed, ret = %d\n", @@ -1049,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); exynos_ufs_set_unipro_pclk_div(ufs); + exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); + /* unipro */ exynos_ufs_config_unipro(ufs); + if (ufs->drv_data->pre_link) + ufs->drv_data->pre_link(ufs); + /* m-phy */ exynos_ufs_phy_init(ufs); if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { @@ -1059,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) exynos_ufs_config_phy_cap_attr(ufs); } - exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); - - if (ufs->drv_data->pre_link) - ufs->drv_data->pre_link(ufs); - return 0; } @@ -1087,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct phy *generic_phy = ufs->phy; struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + u32 val = ilog2(DATA_UNIT_SIZE); exynos_ufs_establish_connt(ufs); exynos_ufs_fit_aggr_timeout(ufs); hci_writel(ufs, 0xa, HCI_DATA_REORDER); - hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE); + + if (hba->caps & UFSHCD_CAP_CRYPTO) + val |= PRDT_PREFETCH_EN; + hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE); + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); @@ -1168,12 +1184,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) ufs->sysreg = NULL; else { if (of_property_read_u32_index(np, "samsung,sysreg", 1, - &ufs->shareability_reg_offset)) { + &ufs->iocc_offset)) { dev_warn(dev, "can't get an offset from sysreg. Set to default value\n"); - ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET; + ufs->iocc_offset = UFS_SHAREABILITY_OFFSET; } } + ufs->iocc_mask = ufs->drv_data->iocc_mask; + /* + * no 'dma-coherent' property means the descriptors are + * non-cacheable so iocc shareability should be disabled. + */ + if (of_dma_is_coherent(dev->of_node)) + ufs->iocc_val = ufs->iocc_mask; + else + ufs->iocc_val = 0; + ufs->pclk_avail_min = PCLK_AVAIL_MIN; ufs->pclk_avail_max = PCLK_AVAIL_MAX; @@ -1497,6 +1523,14 @@ out: return ret; } +static void exynos_ufs_exit(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + phy_power_off(ufs->phy); + phy_exit(ufs->phy); +} + static int exynos_ufs_host_reset(struct ufs_hba *hba) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); @@ -1667,6 +1701,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, } } +static int gs101_ufs_suspend(struct exynos_ufs *ufs) +{ + hci_writel(ufs, 0 << 0, HCI_GPIO_OUT); + return 0; +} + static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { @@ -1675,6 +1715,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, if (status == PRE_CHANGE) return 0; + if (ufs->drv_data->suspend) + ufs->drv_data->suspend(ufs); + if (!ufshcd_is_link_active(hba)) phy_power_off(ufs->phy); @@ -1952,6 +1995,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs, static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .name = "exynos_ufs", .init = exynos_ufs_init, + .exit = exynos_ufs_exit, .hce_enable_notify = exynos_ufs_hce_enable_notify, .link_startup_notify = exynos_ufs_link_startup_notify, .pwr_change_notify = exynos_ufs_pwr_change_notify, @@ -1990,13 +2034,7 @@ static int exynos_ufs_probe(struct platform_device *pdev) static void exynos_ufs_remove(struct platform_device *pdev) { - struct ufs_hba *hba = platform_get_drvdata(pdev); - struct exynos_ufs *ufs = ufshcd_get_variant(hba); - ufshcd_pltfrm_remove(pdev); - - phy_power_off(ufs->phy); - phy_exit(ufs->phy); } static struct exynos_ufs_uic_attr exynos7_uic_attr = { @@ -2035,6 +2073,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = { .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .iocc_mask = UFS_EXYNOSAUTO_SHARABLE, .drv_init = exynosauto_ufs_drv_init, .post_hce_enable = exynosauto_ufs_post_hce_enable, .pre_link = exynosauto_ufs_pre_link, @@ -2136,10 +2175,12 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = { .opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | EXYNOS_UFS_OPT_UFSPR_SECURE | EXYNOS_UFS_OPT_TIMER_TICK_SELECT, + .iocc_mask = UFS_GS101_SHARABLE, .drv_init = gs101_ufs_drv_init, .pre_link = gs101_ufs_pre_link, .post_link = gs101_ufs_post_link, .pre_pwr_change = gs101_ufs_pre_pwr_change, + .suspend = gs101_ufs_suspend, }; static const struct of_device_id exynos_ufs_of_match[] = { diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h index aac517276189..abe7e472759e 100644 --- a/drivers/ufs/host/ufs-exynos.h +++ b/drivers/ufs/host/ufs-exynos.h @@ -181,6 +181,7 @@ struct exynos_ufs_drv_data { struct exynos_ufs_uic_attr *uic_attr; unsigned int quirks; unsigned int opts; + u32 iocc_mask; /* SoC's specific operations */ int (*drv_init)(struct exynos_ufs *ufs); int (*pre_link)(struct exynos_ufs *ufs); @@ -191,6 +192,7 @@ struct exynos_ufs_drv_data { const struct ufs_pa_layer_attr *pwr); int (*pre_hce_enable)(struct exynos_ufs *ufs); int (*post_hce_enable)(struct exynos_ufs *ufs); + int (*suspend)(struct exynos_ufs *ufs); }; struct ufs_phy_time_cfg { @@ -230,7 +232,9 @@ struct exynos_ufs { ktime_t entry_hibern8_t; const struct exynos_ufs_drv_data *drv_data; struct regmap *sysreg; - u32 shareability_reg_offset; + u32 iocc_offset; + u32 iocc_mask; + u32 iocc_val; u32 opts; #define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 1b37449fbffc..37887ec68412 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -5,6 +5,7 @@ #include <linux/acpi.h> #include <linux/clk.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/devfreq.h> #include <linux/gpio/consumer.h> @@ -33,6 +34,10 @@ ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT) #define MCQ_QCFG_SIZE 0x40 +/* De-emphasis for gear-5 */ +#define DEEMPHASIS_3_5_dB 0x04 +#define NO_DEEMPHASIS 0x0 + enum { TSTBUS_UAWM, TSTBUS_UARM, @@ -98,6 +103,24 @@ static const struct __ufs_qcom_bw_table { [MODE_MAX][0][0] = { 7643136, 819200 }, }; +static const struct { + int nminor; + char *prefix; +} testbus_info[TSTBUS_MAX] = { + [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"}, + [TSTBUS_UARM] = {32, "TSTBUS_UARM"}, + [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"}, + [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"}, + [TSTBUS_DFC] = {32, "TSTBUS_DFC"}, + [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"}, + [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"}, + [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"}, + [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"}, + [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"}, + [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"}, + [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"}, +}; + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq); @@ -169,7 +192,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) profile->ll_ops = ufs_qcom_crypto_ops; profile->max_dun_bytes_supported = 8; - profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW; + profile->key_types_supported = qcom_ice_get_supported_key_type(ice); profile->dev = dev; /* @@ -217,17 +240,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; - /* Only AES-256-XTS has been tested so far. */ - if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) - return -EOPNOTSUPP; - ufshcd_hold(hba); - err = qcom_ice_program_key(host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - key->bytes, - key->crypto_cfg.data_unit_size / 512, - slot); + err = qcom_ice_program_key(host->ice, slot, key); ufshcd_release(hba); return err; } @@ -246,9 +260,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, return err; } +static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size, + sw_secret); +} + +static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key); +} + +static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_generate_key(host->ice, lt_key); +} + +static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key); +} + static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { .keyslot_program = ufs_qcom_ice_keyslot_program, .keyslot_evict = ufs_qcom_ice_keyslot_evict, + .derive_sw_secret = ufs_qcom_ice_derive_sw_secret, + .import_key = ufs_qcom_ice_import_key, + .generate_key = ufs_qcom_ice_generate_key, + .prepare_key = ufs_qcom_ice_prepare_key, }; #else @@ -795,6 +853,23 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host) return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw); } +static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes) +{ + u32 equalizer_val; + int ret, i; + + /* Determine the equalizer value based on the gear */ + equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS; + + for (i = 0; i < tx_lanes; i++) { + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i), + equalizer_val); + if (ret) + dev_err(hba->dev, "%s: failed equalizer lane %d\n", + __func__, i); + } +} + static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, const struct ufs_pa_layer_attr *dev_max_params, @@ -846,6 +921,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, dev_req_params->gear_tx, PA_INITIAL_ADAPT); } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING) + ufs_qcom_set_tx_hs_equalizer(hba, + dev_req_params->gear_tx, dev_req_params->lane_tx); + break; case POST_CHANGE: if (ufs_qcom_cfg_timers(hba, false)) { @@ -893,6 +973,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) (pa_vs_config_reg1 | (1 << 12))); } +static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH), + PA_TX_HSG1_SYNC_LENGTH_VAL); + if (err) + dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err); +} + static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) { int err = 0; @@ -900,6 +990,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH) + ufs_qcom_override_pa_tx_hsg1_sync_len(hba); + return err; } @@ -914,6 +1007,10 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { { .wmanufacturerid = UFS_VENDOR_WDC, .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, + { .wmanufacturerid = UFS_VENDOR_SAMSUNG, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH | + UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING }, {} }; @@ -1566,6 +1663,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) return 0; } +static void ufs_qcom_dump_testbus(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int i, j, nminor = 0, testbus_len = 0; + u32 *testbus __free(kfree) = NULL; + char *prefix; + + testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + if (!testbus) + return; + + for (j = 0; j < TSTBUS_MAX; j++) { + nminor = testbus_info[j].nminor; + prefix = testbus_info[j].prefix; + host->testbus.select_major = j; + testbus_len = nminor * sizeof(u32); + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + } +} + +static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix, enum ufshcd_res id) +{ + u32 *regs __free(kfree) = NULL; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) + return -EINVAL; + + regs = kzalloc(len, GFP_ATOMIC); + if (!regs) + return -ENOMEM; + + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = readl(hba->res[id].base + offset + pos); + + print_hex_dump(KERN_ERR, prefix, + len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, + 16, 4, regs, len, false); + + return 0; +} + +static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba) +{ + struct dump_info { + size_t offset; + size_t len; + const char *prefix; + enum ufshcd_res id; + }; + + struct dump_info mcq_dumps[] = { + {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ}, + {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ}, + {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS}, + {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD}, + {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD}, + {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD}, + {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD}, + {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD}, + {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD}, + {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD}, + {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD}, + }; + + for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) { + ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len, + mcq_dumps[i].prefix, mcq_dumps[i].id); + cond_resched(); + } +} + static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { u32 reg; @@ -1573,6 +1749,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) host = ufshcd_get_variant(hba); + dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT)); + dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT)); + dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n", + ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT)); + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, "HCI Vendor Specific Registers "); @@ -1615,6 +1800,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); + + if (hba->mcq_enabled) { + reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ); + ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers "); + } + + /* ensure below dumps occur only in task context due to blocking calls. */ + if (in_task()) { + /* Dump MCQ Host Vendor Specific Registers */ + if (hba->mcq_enabled) + ufs_qcom_dump_mcq_hci_regs(hba); + + /* voluntarily yield the CPU as we are dumping too much data */ + ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); + cond_resched(); + ufs_qcom_dump_testbus(hba); + } } /** @@ -1806,25 +2008,38 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ufshcd_mcq_config_esi(hba, msg); } +struct ufs_qcom_irq { + unsigned int irq; + unsigned int idx; + struct ufs_hba *hba; +}; + static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) { - struct msi_desc *desc = data; - struct device *dev = msi_desc_to_dev(desc); - struct ufs_hba *hba = dev_get_drvdata(dev); - u32 id = desc->msi_index; - struct ufs_hw_queue *hwq = &hba->uhq[id]; + struct ufs_qcom_irq *qi = data; + struct ufs_hba *hba = qi->hba; + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; - ufshcd_mcq_write_cqis(hba, 0x1, id); + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; } +static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi) +{ + for (struct ufs_qcom_irq *q = uqi; q->irq; q++) + devm_free_irq(q->hba->dev, q->irq, q->hba); + + platform_device_msi_free_irqs_all(uqi->hba->dev); + devm_kfree(uqi->hba->dev, uqi); +} + +DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T)) + static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; int nr_irqs, ret; if (host->esi_enabled) @@ -1835,6 +2050,14 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + + struct ufs_qcom_irq *qi __free(ufs_qcom_irq) = + devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + if (!qi) + return -ENOMEM; + /* Preset so __free() has a pointer to hba in all error paths */ + qi[0].hba = hba; + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { @@ -1842,41 +2065,31 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) return ret; } - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - ret = devm_request_irq(hba->dev, desc->irq, - ufs_qcom_mcq_esi_handler, - IRQF_SHARED, "qcom-mcq-esi", desc); + for (int idx = 0; idx < nr_irqs; idx++) { + qi[idx].irq = msi_get_virq(hba->dev, idx); + qi[idx].idx = idx; + qi[idx].hba = hba; + + ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, + IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", - __func__, desc->irq, ret); - failed_desc = desc; - break; + __func__, qi[idx].irq, ret); + qi[idx].irq = 0; + return ret; } } - msi_unlock_descs(hba->dev); - if (ret) { - /* Rewind */ - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - if (desc == failed_desc) - break; - devm_free_irq(hba->dev, desc->irq, hba); - } - msi_unlock_descs(hba->dev); - platform_device_msi_free_irqs_all(hba->dev); - } else { - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) - ufshcd_rmwl(hba, ESI_VEC_MASK, - FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), - REG_UFS_CFG3); - ufshcd_mcq_enable_esi(hba); - host->esi_enabled = true; - } + retain_and_null_ptr(qi); - return ret; + if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && + host->hw_ver.step == 0) { + ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), + REG_UFS_CFG3); + } + ufshcd_mcq_enable_esi(hba); + host->esi_enabled = true; + return 0; } static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index d0e6ec9128e7..0a5cfc2dd4f7 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -50,6 +50,8 @@ enum { */ UFS_AH8_CFG = 0xFC, + UFS_RD_REG_MCQ = 0xD00, + REG_UFS_MEM_ICE_CONFIG = 0x260C, REG_UFS_MEM_ICE_NUM_CORE = 0x2664, @@ -75,6 +77,15 @@ enum { UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, }; +/* QCOM UFS HC vendor specific Hibern8 count registers */ +enum { + REG_UFS_HW_H8_ENTER_CNT = 0x2700, + REG_UFS_SW_H8_ENTER_CNT = 0x2704, + REG_UFS_SW_AFTER_HW_H8_ENTER_CNT = 0x2708, + REG_UFS_HW_H8_EXIT_CNT = 0x270C, + REG_UFS_SW_H8_EXIT_CNT = 0x2710, +}; + enum { UFS_MEM_CQIS_VS = 0x8, }; @@ -122,8 +133,11 @@ enum { TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) /* QUniPro Vendor specific attributes */ +#define PA_TX_HSG1_SYNC_LENGTH 0x1552 #define PA_VS_CONFIG_REG1 0x9000 #define DME_VS_CORE_CLK_CTRL 0xD002 +#define TX_HS_EQUALIZER 0x0037 + /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ #define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16) #define CLK_1US_CYCLES_MASK GENMASK(7, 0) @@ -141,6 +155,21 @@ enum { #define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202 #define UNIPRO_CORE_CLK_FREQ_403_MHZ 403 +/* TX_HSG1_SYNC_LENGTH attr value */ +#define PA_TX_HSG1_SYNC_LENGTH_VAL 0x4A + +/* + * Some ufs device vendors need a different TSync length. + * Enable this quirk to give an additional TX_HS_SYNC_LENGTH. + */ +#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH BIT(16) + +/* + * Some ufs device vendors need a different Deemphasis setting. + * Enable this quirk to tune TX Deemphasis parameters. + */ +#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING BIT(17) + /* ICE allocator type to share AES engines among TX stream and RX stream */ #define ICE_ALLOCATOR_TYPE 2 |