summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2025-09-09 22:46:47 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2025-09-09 22:46:47 -0400
commit7dc0d13d752986a50f6876806a100c97d3fbdcae (patch)
tree7d28b73f5563d6bce80b2ae9581ee610fe84ef46
parent508e754c693162af1fdb6bc00dec0b3237c17462 (diff)
parent2936049277ea3bfd38d12583755556290b9ea494 (diff)
Merge patch series "ufs: host: mediatek: Power Management and stability enhancements"
Peter Wang <peter.wang@mediatek.com> says: These patches collectively enhance the UFS host driver's reliability, power management efficiency, and error recovery mechanisms on MediaTek platforms. They address critical issues and introduce optimizations that improve system stability and performance. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/ufs/core/ufshcd.c3
-rw-r--r--drivers/ufs/host/ufs-mediatek.c171
-rw-r--r--drivers/ufs/host/ufs-mediatek.h1
-rw-r--r--include/ufs/ufshcd.h1
4 files changed, 145 insertions, 31 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index ca6a0f8ccbea..e2157128e3bf 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -6454,13 +6454,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
-static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+void ufshcd_force_error_recovery(struct ufs_hba *hba)
{
spin_lock_irq(hba->host->host_lock);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irq(hba->host->host_lock);
}
+EXPORT_SYMBOL_GPL(ufshcd_force_error_recovery);
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 61c8fe135100..27d244808c31 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -416,7 +416,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
}
}
-static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
+static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
unsigned long retry_ms)
{
u64 timeout, time_checked;
@@ -452,8 +452,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
break;
} while (time_checked < timeout);
- if (wait_idle && sm != VS_HCE_BASE)
+ if (wait_idle && sm != VS_HCE_BASE) {
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
@@ -1429,19 +1433,53 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
return ret;
}
+static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
+{
+ int ret;
+
+ /* disable auto-hibern8 */
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* wait host return to idle state when auto-hibern8 off */
+ ret = ufs_mtk_wait_idle_state(hba, 5);
+ if (ret)
+ goto out;
+
+ ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+
+out:
+ if (ret) {
+ dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
+
+ ufshcd_force_error_recovery(hba);
+
+ /* trigger error handler and break suspend */
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status stage,
const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
+ static u32 reg;
switch (stage) {
case PRE_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ ufs_mtk_auto_hibern8_disable(hba);
+ }
ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
dev_req_params);
break;
case POST_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
break;
default:
ret = -EINVAL;
@@ -1513,8 +1551,19 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
return ret;
}
+
static void ufs_mtk_post_link(struct ufs_hba *hba)
{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 tmp;
+
+ /* fix device PA_INIT no adapt */
+ if (host->ip_ver >= IP_VER_MT6899) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
+ tmp |= 0x100;
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
+ }
+
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
}
@@ -1584,7 +1633,11 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
return err;
/* Check link state to make sure exit h8 success */
- ufs_mtk_wait_idle_state(hba, 5);
+ err = ufs_mtk_wait_idle_state(hba, 5);
+ if (err) {
+ dev_warn(hba->dev, "wait idle fail, err=%d\n", err);
+ return err;
+ }
err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
if (err) {
dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
@@ -1686,21 +1739,6 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
}
}
-static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
-{
- int ret;
-
- /* disable auto-hibern8 */
- ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
-
- /* wait host return to idle state when auto-hibern8 off */
- ufs_mtk_wait_idle_state(hba, 5);
-
- ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
- if (ret)
- dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
-}
-
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1709,7 +1747,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
- ufs_mtk_auto_hibern8_disable(hba);
+ return ufs_mtk_auto_hibern8_disable(hba);
return 0;
}
@@ -1767,8 +1805,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
return 0;
+
fail:
- return ufshcd_link_recovery(hba);
+ /*
+ * Check if the platform (parent) device has resumed, and ensure that
+ * power, clock, and MTCMOS are all turned on.
+ */
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
+ hba->dev->power.request,
+ hba->dev->power.runtime_status,
+ hba->dev->power.runtime_error);
+ }
+
+ return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
}
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
@@ -2139,6 +2190,7 @@ static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
return ret;
}
}
+ host->is_mcq_intr_enabled = true;
return 0;
}
@@ -2222,10 +2274,12 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
- struct device *dev = &pdev->dev;
- struct device_node *reset_node;
- struct platform_device *reset_pdev;
+ struct device *dev = &pdev->dev, *phy_dev = NULL;
+ struct device_node *reset_node, *phy_node = NULL;
+ struct platform_device *reset_pdev, *phy_pdev = NULL;
struct device_link *link;
+ struct ufs_hba *hba;
+ struct ufs_mtk_host *host;
reset_node = of_find_compatible_node(NULL, NULL,
"ti,syscon-reset");
@@ -2252,13 +2306,51 @@ static int ufs_mtk_probe(struct platform_device *pdev)
}
skip_reset:
+ /* find phy node */
+ phy_node = of_parse_phandle(dev->of_node, "phys", 0);
+
+ if (phy_node) {
+ phy_pdev = of_find_device_by_node(phy_node);
+ if (!phy_pdev)
+ goto skip_phy;
+ phy_dev = &phy_pdev->dev;
+
+ pm_runtime_set_active(phy_dev);
+ pm_runtime_enable(phy_dev);
+ pm_runtime_get_sync(phy_dev);
+
+ put_device(phy_dev);
+ dev_info(dev, "phys node found\n");
+ } else {
+ dev_notice(dev, "phys node not found\n");
+ }
+
+skip_phy:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
-
-out:
- if (err)
+ if (err) {
dev_err(dev, "probe failed %d\n", err);
+ goto out;
+ }
+
+ hba = platform_get_drvdata(pdev);
+ if (!hba)
+ goto out;
+
+ if (phy_node && phy_dev) {
+ host = ufshcd_get_variant(hba);
+ host->phy_dev = phy_dev;
+ }
+ /*
+ * Because the default power setting of VSx (the upper layer of
+ * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from
+ * entering LPM.
+ */
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+out:
+ of_node_put(phy_node);
of_node_put(reset_node);
return err;
}
@@ -2283,27 +2375,38 @@ static int ufs_mtk_system_suspend(struct device *dev)
ret = ufshcd_system_suspend(dev);
if (ret)
- return ret;
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
ufs_mtk_dev_vreg_set_lpm(hba, true);
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
- return 0;
+out:
+ return ret;
}
static int ufs_mtk_system_resume(struct device *dev)
{
+ int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
struct arm_smccc_res res;
- ufs_mtk_dev_vreg_set_lpm(hba, false);
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
- return ufshcd_system_resume(dev);
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+out:
+ ret = ufshcd_system_resume(dev);
+
+ return ret;
}
#endif
@@ -2311,6 +2414,7 @@ static int ufs_mtk_system_resume(struct device *dev)
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
int ret = 0;
@@ -2323,17 +2427,24 @@ static int ufs_mtk_runtime_suspend(struct device *dev)
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
+ if (host->phy_dev)
+ pm_runtime_put_sync(host->phy_dev);
+
return 0;
}
static int ufs_mtk_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
+ if (host->phy_dev)
+ pm_runtime_get_sync(host->phy_dev);
+
ufs_mtk_dev_vreg_set_lpm(hba, false);
return ufshcd_runtime_resume(dev);
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index e46dc5fa209d..dfbf78bd8664 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -193,6 +193,7 @@ struct ufs_mtk_host {
bool is_mcq_intr_enabled;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
+ struct device *phy_dev;
};
/* MTK delay of autosuspend: 500 ms */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 30ff169878dc..f3351f71674c 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -1507,5 +1507,6 @@ int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
int ufshcd_write_ee_control(struct ufs_hba *hba);
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
const u16 *other_mask, u16 set, u16 clr);
+void ufshcd_force_error_recovery(struct ufs_hba *hba);
#endif /* End of Header */