summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUlf Hansson <ulf.hansson@linaro.org>2025-03-11 17:08:22 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2025-03-12 21:14:53 +0100
commit0f42194c6b22d687fd53c8aea5413cf976366672 (patch)
tree2591af76686dd10a12c86ce2c0c10fc6962ee089
parent13b4f9e126cb55b65430bae7e704cea23c78620c (diff)
PM: s2idle: Drop redundant locks when entering s2idle
The calls to cpus_read_lock|unlock() protects us from getting CPUS hotplugged, while entering suspend-to-idle. However, when s2idle_enter() is called we should be far beyond the point when CPUs may be hotplugged. Let's therefore simplify the code and drop the use of the lock. Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://patch.msgid.link/20250311160827.1129643-2-ulf.hansson@linaro.org [ rjw: Rewrote the new comment ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--kernel/power/suspend.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 09f8397bae15..1876abf1be15 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -91,6 +91,12 @@ static void s2idle_enter(void)
{
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
+ /*
+ * The correctness of the code below depends on the number of online
+ * CPUs being stable, but CPUs cannot be taken offline or put online
+ * while it is running.
+ */
+
raw_spin_lock_irq(&s2idle_lock);
if (pm_wakeup_pending())
goto out;
@@ -98,8 +104,6 @@ static void s2idle_enter(void)
s2idle_state = S2IDLE_STATE_ENTER;
raw_spin_unlock_irq(&s2idle_lock);
- cpus_read_lock();
-
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
/* Make the current CPU wait so it can enter the idle loop too. */
@@ -112,8 +116,6 @@ static void s2idle_enter(void)
*/
wake_up_all_idle_cpus();
- cpus_read_unlock();
-
raw_spin_lock_irq(&s2idle_lock);
out: