diff options
Diffstat (limited to 'drivers/base/power')
| -rw-r--r-- | drivers/base/power/Makefile | 1 | ||||
| -rw-r--r-- | drivers/base/power/common.c | 9 | ||||
| -rw-r--r-- | drivers/base/power/generic_ops.c | 85 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 288 | ||||
| -rw-r--r-- | drivers/base/power/runtime-test.c | 249 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 203 | ||||
| -rw-r--r-- | drivers/base/power/trace.c | 4 | ||||
| -rw-r--r-- | drivers/base/power/wakeup.c | 26 |
8 files changed, 625 insertions, 240 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 01f11629d241..2989e42d0161 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o +obj-$(CONFIG_PM_RUNTIME_KUNIT_TEST) += runtime-test.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 781968a128ff..6ecf9ce4a4e6 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); /** * dev_pm_domain_attach - Attach a device to its PM domain. * @dev: Device to attach. - * @power_on: Used to indicate whether we should power on the device. + * @flags: indicate whether we should power on/off the device on attach/detach * * The @dev may only be attached to a single PM domain. By iterating through * the available alternatives we try to find a valid PM domain for the device. @@ -100,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); * Returns 0 on successfully attached PM domain, or when it is found that the * device doesn't need a PM domain, else a negative error code. */ -int dev_pm_domain_attach(struct device *dev, bool power_on) +int dev_pm_domain_attach(struct device *dev, u32 flags) { int ret; if (dev->pm_domain) return 0; - ret = acpi_dev_pm_attach(dev, power_on); + ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON)); if (!ret) ret = genpd_dev_pm_attach(dev); + if (dev->pm_domain) + dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF); + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(dev_pm_domain_attach); diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 6502720bb564..af99bbcf281c 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -8,6 +8,13 @@ #include <linux/pm_runtime.h> #include <linux/export.h> +#define CALL_PM_OP(dev, op) \ +({ \ + struct device *_dev = (dev); \ + const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \ + pm && pm->op ? pm->op(_dev) : 0; \ +}) + #ifdef CONFIG_PM /** * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. @@ -19,12 +26,7 @@ */ int pm_generic_runtime_suspend(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int ret; - - ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; - - return ret; + return CALL_PM_OP(dev, runtime_suspend); } EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); @@ -38,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); */ int pm_generic_runtime_resume(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int ret; - - ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; - - return ret; + return CALL_PM_OP(dev, runtime_resume); } EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); #endif /* CONFIG_PM */ @@ -72,9 +69,7 @@ int pm_generic_prepare(struct device *dev) */ int pm_generic_suspend_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; + return CALL_PM_OP(dev, suspend_noirq); } EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); @@ -84,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); */ int pm_generic_suspend_late(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; + return CALL_PM_OP(dev, suspend_late); } EXPORT_SYMBOL_GPL(pm_generic_suspend_late); @@ -96,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late); */ int pm_generic_suspend(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->suspend ? pm->suspend(dev) : 0; + return CALL_PM_OP(dev, suspend); } EXPORT_SYMBOL_GPL(pm_generic_suspend); @@ -108,9 +99,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend); */ int pm_generic_freeze_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; + return CALL_PM_OP(dev, freeze_noirq); } EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); @@ -120,9 +109,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); */ int pm_generic_freeze(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->freeze ? pm->freeze(dev) : 0; + return CALL_PM_OP(dev, freeze); } EXPORT_SYMBOL_GPL(pm_generic_freeze); @@ -132,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze); */ int pm_generic_poweroff_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; + return CALL_PM_OP(dev, poweroff_noirq); } EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); @@ -144,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); */ int pm_generic_poweroff_late(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; + return CALL_PM_OP(dev, poweroff_late); } EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); @@ -156,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); */ int pm_generic_poweroff(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->poweroff ? pm->poweroff(dev) : 0; + return CALL_PM_OP(dev, poweroff); } EXPORT_SYMBOL_GPL(pm_generic_poweroff); @@ -168,9 +149,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff); */ int pm_generic_thaw_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; + return CALL_PM_OP(dev, thaw_noirq); } EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); @@ -180,9 +159,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); */ int pm_generic_thaw(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->thaw ? pm->thaw(dev) : 0; + return CALL_PM_OP(dev, thaw); } EXPORT_SYMBOL_GPL(pm_generic_thaw); @@ -192,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw); */ int pm_generic_resume_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; + return CALL_PM_OP(dev, resume_noirq); } EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); @@ -204,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); */ int pm_generic_resume_early(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->resume_early ? pm->resume_early(dev) : 0; + return CALL_PM_OP(dev, resume_early); } EXPORT_SYMBOL_GPL(pm_generic_resume_early); @@ -216,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early); */ int pm_generic_resume(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->resume ? pm->resume(dev) : 0; + return CALL_PM_OP(dev, resume); } EXPORT_SYMBOL_GPL(pm_generic_resume); @@ -228,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); */ int pm_generic_restore_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; + return CALL_PM_OP(dev, restore_noirq); } EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); @@ -240,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); */ int pm_generic_restore_early(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->restore_early ? pm->restore_early(dev) : 0; + return CALL_PM_OP(dev, restore_early); } EXPORT_SYMBOL_GPL(pm_generic_restore_early); @@ -252,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early); */ int pm_generic_restore(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->restore ? pm->restore(dev) : 0; + return CALL_PM_OP(dev, restore); } EXPORT_SYMBOL_GPL(pm_generic_restore); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 19fd55b8ac77..97a8b4fcf471 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -34,16 +34,13 @@ #include <linux/cpufreq.h> #include <linux/devfreq.h> #include <linux/timer.h> +#include <linux/nmi.h> #include "../base.h" #include "power.h" typedef int (*pm_callback_t)(struct device *); -#define list_for_each_entry_rcu_locked(pos, head, member) \ - list_for_each_entry_rcu(pos, head, member, \ - device_links_read_lock_held()) - /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and @@ -66,6 +63,20 @@ static pm_message_t pm_transition; static DEFINE_MUTEX(async_wip_mtx); static int async_error; +/** + * pm_hibernate_is_recovering - if recovering from hibernate due to error. + * + * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or + * recovering from some error. + * + * Return: true for error case, false for normal case. + */ +bool pm_hibernate_is_recovering(void) +{ + return pm_transition.event == PM_EVENT_RECOVER; +} +EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering); + static const char *pm_verb(int event) { switch (event) { @@ -85,6 +96,8 @@ static const char *pm_verb(int event) return "restore"; case PM_EVENT_RECOVER: return "recover"; + case PM_EVENT_POWEROFF: + return "poweroff"; default: return "(unknown PM event)"; } @@ -267,8 +280,9 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) * callbacks freeing the link objects for the links in the list we're * walking. */ - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) - if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dev_for_each_link_to_supplier(link, dev) + if (READ_ONCE(link->status) != DL_STATE_DORMANT && + !device_link_flag_is_sync_state_only(link->flags)) dpm_wait(link->supplier, async); device_links_read_unlock(idx); @@ -324,8 +338,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) * continue instead of trying to continue in parallel with its * unregistration). */ - list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) - if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dev_for_each_link_to_consumer(link, dev) + if (READ_ONCE(link->status) != DL_STATE_DORMANT && + !device_link_flag_is_sync_state_only(link->flags)) dpm_wait(link->consumer, async); device_links_read_unlock(idx); @@ -355,6 +370,7 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: return ops->freeze; + case PM_EVENT_POWEROFF: case PM_EVENT_HIBERNATE: return ops->poweroff; case PM_EVENT_THAW: @@ -389,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: return ops->freeze_late; + case PM_EVENT_POWEROFF: case PM_EVENT_HIBERNATE: return ops->poweroff_late; case PM_EVENT_THAW: @@ -423,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: return ops->freeze_noirq; + case PM_EVENT_POWEROFF: case PM_EVENT_HIBERNATE: return ops->poweroff_noirq; case PM_EVENT_THAW: @@ -503,6 +521,11 @@ struct dpm_watchdog { #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ struct dpm_watchdog wd +static bool __read_mostly dpm_watchdog_all_cpu_backtrace; +module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644); +MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace, + "Backtrace all CPUs on DPM watchdog timeout"); + /** * dpm_watchdog_handler - Driver suspend / resume watchdog handler. * @t: The timer that PM watchdog depends on. @@ -513,13 +536,17 @@ struct dpm_watchdog { */ static void dpm_watchdog_handler(struct timer_list *t) { - struct dpm_watchdog *wd = from_timer(wd, t, timer); + struct dpm_watchdog *wd = timer_container_of(wd, t, timer); struct timer_list *timer = &wd->timer; unsigned int time_left; if (wd->fatal) { + unsigned int this_cpu = smp_processor_id(); + dev_emerg(wd->dev, "**** DPM device timeout ****\n"); show_stack(wd->tsk, NULL, KERN_EMERG); + if (dpm_watchdog_all_cpu_backtrace) + trigger_allbutcpu_cpu_backtrace(this_cpu); panic("%s %s: unrecoverable failure\n", dev_driver_string(wd->dev), dev_name(wd->dev)); } @@ -638,16 +665,36 @@ static int dpm_async_with_cleanup(struct device *dev, void *fn) static void dpm_async_resume_children(struct device *dev, async_func_t func) { /* + * Prevent racing with dpm_clear_async_state() during initial list + * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and + * dpm_resume(). + */ + guard(mutex)(&dpm_list_mtx); + + /* * Start processing "async" children of the device unless it's been * started already for them. - * - * This could have been done for the device's "async" consumers too, but - * they either need to wait for their parents or the processing has - * already started for them after their parents were processed. */ device_for_each_child(dev, func, dpm_async_with_cleanup); } +static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + dpm_async_resume_children(dev, func); + + idx = device_links_read_lock(); + + /* Start processing the device's "async" consumers. */ + dev_for_each_link_to_consumer(link, dev) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->consumer, func); + + device_links_read_unlock(idx); +} + static void dpm_clear_async_state(struct device *dev) { reinit_completion(&dev->power.completion); @@ -656,7 +703,14 @@ static void dpm_clear_async_state(struct device *dev) static bool dpm_root_device(struct device *dev) { - return !dev->parent; + lockdep_assert_held(&dpm_list_mtx); + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return !dev->parent && list_empty(&dev->links.suppliers); } static void async_resume_noirq(void *data, async_cookie_t cookie); @@ -683,8 +737,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy if (dev->power.syscore || dev->power.direct_complete) goto Out; - if (!dev->power.is_noirq_suspended) + if (!dev->power.is_noirq_suspended) { + /* + * This means that system suspend has been aborted in the noirq + * phase before invoking the noirq suspend callback for the + * device, so if device_suspend_late() has left it in suspend, + * device_resume_early() should leave it in suspend either in + * case the early resume of it depends on the noirq resume that + * has not run. + */ + if (dev_pm_skip_suspend(dev)) + dev->power.must_resume = false; + goto Out; + } if (!dpm_wait_for_superior(dev, async)) goto Out; @@ -740,12 +806,12 @@ Out: TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } - dpm_async_resume_children(dev, async_resume_noirq); + dpm_async_resume_subordinate(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -797,7 +863,7 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "noirq"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); @@ -837,12 +903,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore || dev->power.direct_complete) + if (dev->power.direct_complete) goto Out; if (!dev->power.is_late_suspended) goto Out; + if (dev->power.syscore) + goto Skip; + if (!dpm_wait_for_superior(dev, async)) goto Out; @@ -875,20 +944,20 @@ Run: Skip: dev->power.is_late_suspended = false; + pm_runtime_enable(dev); Out: TRACE_RESUME(error); - pm_runtime_enable(dev); complete_all(&dev->power.completion); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } - dpm_async_resume_children(dev, async_resume_early); + dpm_async_resume_subordinate(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -944,7 +1013,7 @@ void dpm_resume_early(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "early"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_EARLY); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); @@ -985,6 +1054,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (!dev->power.is_suspended) goto Complete; + dev->power.is_suspended = false; + if (dev->power.direct_complete) { /* * Allow new children to be added under the device after this @@ -1047,7 +1118,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) End: error = dpm_run_callback(callback, dev, state, info); - dev->power.is_suspended = false; device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1058,12 +1128,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } - dpm_async_resume_children(dev, async_resume); + dpm_async_resume_subordinate(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1087,7 +1157,6 @@ void dpm_resume(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume"), state.event, true); - might_sleep(); pm_transition = state; async_error = 0; @@ -1123,7 +1192,7 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, NULL); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME); cpufreq_resume(); @@ -1190,7 +1259,6 @@ void dpm_complete(pm_message_t state) struct list_head list; trace_suspend_resume(TPS("dpm_complete"), state.event, true); - might_sleep(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); @@ -1229,6 +1297,7 @@ void dpm_complete(pm_message_t state) void dpm_resume_end(pm_message_t state) { dpm_resume(state); + pm_restore_gfp_mask(); dpm_complete(state); } EXPORT_SYMBOL_GPL(dpm_resume_end); @@ -1249,10 +1318,15 @@ static bool dpm_leaf_device(struct device *dev) return false; } - return true; + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return list_empty(&dev->links.consumers); } -static void dpm_async_suspend_parent(struct device *dev, async_func_t func) +static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) { guard(mutex)(&dpm_list_mtx); @@ -1264,11 +1338,47 @@ static void dpm_async_suspend_parent(struct device *dev, async_func_t func) * deleted before it. */ if (!device_pm_initialized(dev)) - return; + return false; /* Start processing the device's parent if it is "async". */ if (dev->parent) dpm_async_with_cleanup(dev->parent, func); + + return true; +} + +static void dpm_async_suspend_superior(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + if (!dpm_async_suspend_parent(dev, func)) + return; + + idx = device_links_read_lock(); + + /* Start processing the device's "async" suppliers. */ + dev_for_each_link_to_supplier(link, dev) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->supplier, func); + + device_links_read_unlock(idx); +} + +static void dpm_async_suspend_complete_all(struct list_head *device_list) +{ + struct device *dev; + + guard(mutex)(&async_wip_mtx); + + list_for_each_entry_reverse(dev, device_list, power.entry) { + /* + * In case the device is being waited for and async processing + * has not started for it yet, let the waiters make progress. + */ + if (!dev->power.work_in_progress) + complete_all(&dev->power.completion); + } } /** @@ -1302,7 +1412,7 @@ static void dpm_superior_set_must_resume(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + dev_for_each_link_to_supplier(link, dev) link->supplier->power.must_resume = true; device_links_read_unlock(idx); @@ -1319,7 +1429,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie); * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) +static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1330,7 +1440,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (dev->power.syscore || dev->power.direct_complete) @@ -1363,7 +1473,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); goto Complete; @@ -1389,12 +1499,10 @@ Complete: complete_all(&dev->power.completion); TRACE_SUSPEND(error); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend_noirq); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend_noirq); } static void async_suspend_noirq(void *data, async_cookie_t cookie) @@ -1409,7 +1517,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); @@ -1440,18 +1548,19 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state, false); + device_suspend_noirq(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_late_early_list); /* * Move all devices to the target list to resume them * properly. */ - list_splice(&dpm_late_early_list, &dpm_noirq_list); + list_splice_init(&dpm_late_early_list, &dpm_noirq_list); break; } } @@ -1459,9 +1568,8 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); @@ -1516,7 +1624,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie); * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state, bool async) +static void device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1525,25 +1633,28 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_SUSPEND(0); - /* - * Disable runtime PM for the device without checking if there is a - * pending resume request for it. - */ - __pm_runtime_disable(dev, false); - dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (pm_wakeup_pending()) { - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } - if (dev->power.syscore || dev->power.direct_complete) + if (dev->power.direct_complete) goto Complete; + /* + * Disable runtime PM for the device without checking if there is a + * pending resume request for it. + */ + __pm_runtime_disable(dev, false); + + if (dev->power.syscore) + goto Skip; + if (dev->pm_domain) { info = "late power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -1571,9 +1682,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async late" : " late", error); + pm_runtime_enable(dev); goto Complete; } dpm_propagate_wakeup_to_parent(dev); @@ -1585,12 +1697,10 @@ Complete: TRACE_SUSPEND(error); complete_all(&dev->power.completion); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend_late); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend_late); } static void async_suspend_late(void *data, async_cookie_t cookie) @@ -1609,7 +1719,7 @@ int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); @@ -1642,18 +1752,19 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state, false); + device_suspend_late(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_suspended_list); /* * Move all devices to the target list to resume them * properly. */ - list_splice(&dpm_suspended_list, &dpm_late_early_list); + list_splice_init(&dpm_suspended_list, &dpm_late_early_list); break; } } @@ -1661,9 +1772,8 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) { dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); @@ -1735,7 +1845,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + dev_for_each_link_to_supplier(link, dev) { spin_lock_irq(&link->supplier->power.lock); link->supplier->power.direct_complete = false; spin_unlock_irq(&link->supplier->power.lock); @@ -1752,7 +1862,7 @@ static void async_suspend(void *data, async_cookie_t cookie); * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. */ -static int device_suspend(struct device *dev, pm_message_t state, bool async) +static void device_suspend(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1764,7 +1874,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) dpm_wait_for_subordinate(dev, async); - if (async_error) { + if (READ_ONCE(async_error)) { dev->power.direct_complete = false; goto Complete; } @@ -1784,7 +1894,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) if (pm_wakeup_pending()) { dev->power.direct_complete = false; - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1868,7 +1978,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) Complete: if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } @@ -1876,12 +1986,10 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) complete_all(&dev->power.completion); TRACE_SUSPEND(error); - if (error || async_error) - return error; - - dpm_async_suspend_parent(dev, async_suspend); + if (error || READ_ONCE(async_error)) + return; - return 0; + dpm_async_suspend_superior(dev, async_suspend); } static void async_suspend(void *data, async_cookie_t cookie) @@ -1900,7 +2008,7 @@ int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); struct device *dev; - int error = 0; + int error; trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); @@ -1935,18 +2043,19 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend(dev, state, false); + device_suspend(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) { + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_prepared_list); /* * Move all devices to the target list to resume them * properly. */ - list_splice(&dpm_prepared_list, &dpm_suspended_list); + list_splice_init(&dpm_prepared_list, &dpm_suspended_list); break; } } @@ -1954,9 +2063,8 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND); @@ -1989,8 +2097,8 @@ static bool device_prepare_smart_suspend(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + dev_for_each_link_to_supplier(link, dev) { + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; if (!dev_pm_smart_suspend(link->supplier) && @@ -2040,6 +2148,7 @@ static int device_prepare(struct device *dev, pm_message_t state) device_lock(dev); dev->power.wakeup_path = false; + dev->power.out_band_wakeup = false; if (dev->power.no_pm_callbacks) goto unlock; @@ -2101,7 +2210,6 @@ int dpm_prepare(pm_message_t state) int error = 0; trace_suspend_resume(TPS("dpm_prepare"), state.event, true); - might_sleep(); /* * Give a chance for the known devices to complete their probes, before @@ -2168,8 +2276,10 @@ int dpm_suspend_start(pm_message_t state) error = dpm_prepare(state); if (error) dpm_save_failed_step(SUSPEND_PREPARE); - else + else { + pm_restrict_gfp_mask(); error = dpm_suspend(state); + } dpm_show_time(starttime, state, error, "start"); return error; diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c new file mode 100644 index 000000000000..1535ad2b0264 --- /dev/null +++ b/drivers/base/power/runtime-test.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 Google, Inc. + */ + +#include <linux/cleanup.h> +#include <linux/pm_runtime.h> +#include <kunit/device.h> +#include <kunit/test.h> + +#define DEVICE_NAME "pm_runtime_test_device" + +static void pm_runtime_depth_test(struct kunit *test) +{ + struct device *dev = kunit_device_register(test, DEVICE_NAME); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + pm_runtime_enable(dev); + + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev)); + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + KUNIT_EXPECT_EQ(test, 1, pm_runtime_get_sync(dev)); /* "already active" */ + KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev)); + KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev)); + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); +} + +/* Test pm_runtime_put() and friends when already suspended. */ +static void pm_runtime_already_suspended_test(struct kunit *test) +{ + struct device *dev = kunit_device_register(test, DEVICE_NAME); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + pm_runtime_enable(dev); + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + + pm_runtime_get_noresume(dev); + KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev)); + + KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev)); + KUNIT_EXPECT_EQ(test, 1, pm_runtime_autosuspend(dev)); + KUNIT_EXPECT_EQ(test, 1, pm_request_autosuspend(dev)); + + pm_runtime_get_noresume(dev); + KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev)); + + pm_runtime_get_noresume(dev); + pm_runtime_put_autosuspend(dev); + + /* Grab 2 refcounts */ + pm_runtime_get_noresume(dev); + pm_runtime_get_noresume(dev); + /* The first put() sees usage_count 1 */ + KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync_autosuspend(dev)); + /* The second put() sees usage_count 0 but tells us "already suspended". */ + KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev)); + + /* Should have remained suspended the whole time. */ + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); +} + +static void pm_runtime_idle_test(struct kunit *test) +{ + struct device *dev = kunit_device_register(test, DEVICE_NAME); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + pm_runtime_enable(dev); + + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev)); + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev)); + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + pm_runtime_put_noidle(dev); + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev)); + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev)); + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev)); +} + +static void pm_runtime_disabled_test(struct kunit *test) +{ + struct device *dev = kunit_device_register(test, DEVICE_NAME); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + /* Never called pm_runtime_enable() */ + KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev)); + + /* "disabled" is treated as "active" */ + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + KUNIT_EXPECT_FALSE(test, pm_runtime_suspended(dev)); + + /* + * Note: these "fail", but they still acquire/release refcounts, so + * keep them balanced. + */ + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev)); + pm_runtime_put(dev); + + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get_sync(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_put_sync(dev)); + + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev)); + pm_runtime_put_autosuspend(dev); + + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume_and_get(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_idle(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_request_idle(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_request_resume(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_request_autosuspend(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_suspend(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume(dev)); + KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_autosuspend(dev)); + + /* Still disabled */ + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev)); +} + +static void pm_runtime_error_test(struct kunit *test) +{ + struct device *dev = kunit_device_register(test, DEVICE_NAME); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + pm_runtime_enable(dev); + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + + /* Fake a .runtime_resume() error */ + dev->power.runtime_error = -EIO; + + /* + * Note: these "fail", but they still acquire/release refcounts, so + * keep them balanced. + */ + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev)); + pm_runtime_put(dev); + + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get_sync(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync(dev)); + + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev)); + pm_runtime_put_autosuspend(dev); + + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync_autosuspend(dev)); + + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume_and_get(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_idle(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_idle(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_resume(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_autosuspend(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_suspend(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume(dev)); + KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_autosuspend(dev)); + + /* Error is still pending */ + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + KUNIT_EXPECT_EQ(test, -EIO, dev->power.runtime_error); + /* Clear error */ + KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_suspended(dev)); + KUNIT_EXPECT_EQ(test, 0, dev->power.runtime_error); + /* Still suspended */ + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + + KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev)); + pm_runtime_barrier(dev); + pm_runtime_put(dev); + pm_runtime_suspend(dev); /* flush the put(), to suspend */ + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); + + KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev)); + KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev)); + + KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev)); + pm_runtime_put_autosuspend(dev); + + KUNIT_EXPECT_EQ(test, 0, pm_runtime_resume_and_get(dev)); + + /* + * The following should all return -EAGAIN (usage is non-zero) or 1 + * (already resumed). + */ + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev)); + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev)); + KUNIT_EXPECT_EQ(test, 1, pm_request_resume(dev)); + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_autosuspend(dev)); + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_suspend(dev)); + KUNIT_EXPECT_EQ(test, 1, pm_runtime_resume(dev)); + KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_autosuspend(dev)); + + KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev)); + + /* Suspended again */ + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); +} + +/* + * Explore a typical probe() sequence in which a device marks itself powered, + * but doesn't hold any runtime PM reference, so it suspends as soon as it goes + * idle. + */ +static void pm_runtime_probe_active_test(struct kunit *test) +{ + struct device *dev = kunit_device_register(test, DEVICE_NAME); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + KUNIT_EXPECT_TRUE(test, pm_runtime_status_suspended(dev)); + + KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_active(dev)); + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + + pm_runtime_enable(dev); + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + + /* Nothing to flush. We stay active. */ + pm_runtime_barrier(dev); + KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); + + /* Ask for idle? Now we suspend. */ + KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev)); + KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); +} + +static struct kunit_case pm_runtime_test_cases[] = { + KUNIT_CASE(pm_runtime_depth_test), + KUNIT_CASE(pm_runtime_already_suspended_test), + KUNIT_CASE(pm_runtime_idle_test), + KUNIT_CASE(pm_runtime_disabled_test), + KUNIT_CASE(pm_runtime_error_test), + KUNIT_CASE(pm_runtime_probe_active_test), + {} +}; + +static struct kunit_suite pm_runtime_test_suite = { + .name = "pm_runtime_test_cases", + .test_cases = pm_runtime_test_cases, +}; + +kunit_test_suite(pm_runtime_test_suite); +MODULE_DESCRIPTION("Runtime power management unit test suite"); +MODULE_LICENSE("GPL"); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c55a7c70bc1a..84676cc24221 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -19,10 +19,24 @@ typedef int (*pm_callback_t)(struct device *); +static inline pm_callback_t get_callback_ptr(const void *start, size_t offset) +{ + return *(pm_callback_t *)(start + offset); +} + +static pm_callback_t __rpm_get_driver_callback(struct device *dev, + size_t cb_offset) +{ + if (dev->driver && dev->driver->pm) + return get_callback_ptr(dev->driver->pm, cb_offset); + + return NULL; +} + static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) { - pm_callback_t cb; const struct dev_pm_ops *ops; + pm_callback_t cb = NULL; if (dev->pm_domain) ops = &dev->pm_domain->ops; @@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) ops = NULL; if (ops) - cb = *(pm_callback_t *)((void *)ops + cb_offset); - else - cb = NULL; + cb = get_callback_ptr(ops, cb_offset); - if (!cb && dev->driver && dev->driver->pm) - cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); + if (!cb) + cb = __rpm_get_driver_callback(dev, cb_offset); return cb; } @@ -78,7 +90,7 @@ static void update_pm_runtime_accounting(struct device *dev) /* * Because ktime_get_mono_fast_ns() is not monotonic during * timekeeping updates, ensure that 'now' is after the last saved - * timesptamp. + * timestamp. */ if (now < last) return; @@ -205,7 +217,7 @@ static int dev_memalloc_noio(struct device *dev, void *data) * resume/suspend callback of any one of its ancestors(or the * block device itself), the deadlock may be triggered inside the * memory allocation since it might not complete until the block - * device becomes active and the involed page I/O finishes. The + * device becomes active and the involved page I/O finishes. The * situation is pointed out first by Alan Stern. Network device * are involved in iSCSI kind of situation. * @@ -290,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev) device_links_read_lock_held()) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; retval = pm_runtime_get_sync(link->supplier); @@ -486,6 +498,9 @@ static int rpm_idle(struct device *dev, int rpmflags) if (retval < 0) ; /* Conditions are wrong. */ + else if ((rpmflags & RPM_GET_PUT) && retval == 1) + ; /* put() is allowed in RPM_SUSPENDED */ + /* Idle notifications are allowed only in the RPM_ACTIVE state. */ else if (dev->power.runtime_status != RPM_ACTIVE) retval = -EAGAIN; @@ -784,6 +799,8 @@ static int rpm_resume(struct device *dev, int rpmflags) if (dev->power.runtime_status == RPM_ACTIVE && dev->power.last_status == RPM_ACTIVE) retval = 1; + else if (rpmflags & RPM_TRANSPARENT) + goto out; else retval = -EACCES; } @@ -1191,10 +1208,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * * Return -EINVAL if runtime PM is disabled for @dev. * - * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either - * @ign_usage_count is %true or the runtime PM usage counter of @dev is not - * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 - * without changing the usage counter. + * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count + * is set, or (2) @dev is not ignoring children and its active child count is + * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment + * the usage counter of @dev and return 1. + * + * Otherwise, return 0 without changing the usage counter. * * If @ign_usage_count is %true, this function can be used to prevent suspending * the device when its runtime PM status is %RPM_ACTIVE. @@ -1216,7 +1235,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) retval = -EINVAL; } else if (dev->power.runtime_status != RPM_ACTIVE) { retval = 0; - } else if (ign_usage_count) { + } else if (ign_usage_count || (!dev->power.ignore_children && + atomic_read(&dev->power.child_count) > 0)) { retval = 1; atomic_inc(&dev->power.usage_count); } else { @@ -1249,10 +1269,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); * @dev: Target device. * * Increment the runtime PM usage counter of @dev if its runtime PM status is - * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case - * it returns 1. If the device is in a different state or its usage_count is 0, - * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device, - * in which case also the usage_count will remain unmodified. + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not + * ignoring children and its active child count is nonzero. 1 is returned in + * this case. + * + * If @dev is in a different state or it is not in use (that is, its usage + * counter is 0, or it is ignoring children, or its active child count is 0), + * 0 is returned. + * + * -EINVAL is returned if runtime PM is disabled for the device, in which case + * also the usage counter of @dev is not updated. */ int pm_runtime_get_if_in_use(struct device *dev) { @@ -1441,30 +1467,20 @@ static void __pm_runtime_barrier(struct device *dev) * Next, make sure that all pending requests for the device have been flushed * from pm_wq and wait for all runtime PM operations involving the device in * progress to complete. - * - * Return value: - * 1, if there was a resume request pending and the device had to be woken up, - * 0, otherwise */ -int pm_runtime_barrier(struct device *dev) +void pm_runtime_barrier(struct device *dev) { - int retval = 0; - pm_runtime_get_noresume(dev); spin_lock_irq(&dev->power.lock); if (dev->power.request_pending - && dev->power.request == RPM_REQ_RESUME) { + && dev->power.request == RPM_REQ_RESUME) rpm_resume(dev, 0); - retval = 1; - } __pm_runtime_barrier(dev); spin_unlock_irq(&dev->power.lock); pm_runtime_put_noidle(dev); - - return retval; } EXPORT_SYMBOL_GPL(pm_runtime_barrier); @@ -1638,9 +1654,12 @@ EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. * - * Increase the device's usage count and clear its power.runtime_auto flag, - * so that it cannot be suspended at run time until pm_runtime_allow() is called - * for it. + * Resume @dev if already suspended and block runtime suspend of @dev in such + * a way that it can be unblocked via the /sys/devices/.../power/control + * interface, or otherwise by calling pm_runtime_allow(). + * + * Calling this function many times in a row has the same effect as calling it + * once. */ void pm_runtime_forbid(struct device *dev) { @@ -1661,7 +1680,13 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid); * pm_runtime_allow - Unblock runtime PM of a device. * @dev: Device to handle. * - * Decrease the device's usage count and set its power.runtime_auto flag. + * Unblock runtime suspend of @dev after it has been blocked by + * pm_runtime_forbid() (for instance, if it has been blocked via the + * /sys/devices/.../power/control interface), check if @dev can be + * suspended and suspend it in that case. + * + * Calling this function many times in a row has the same effect as calling it + * once. */ void pm_runtime_allow(struct device *dev) { @@ -1827,7 +1852,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.needs_force_resume = 0; + dev->power.needs_force_resume = false; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1854,6 +1879,11 @@ void pm_runtime_reinit(struct device *dev) pm_runtime_put(dev->parent); } } + /* + * Clear power.needs_force_resume in case it has been set by + * pm_runtime_force_suspend() invoked from a driver remove callback. + */ + dev->power.needs_force_resume = false; } /** @@ -1877,9 +1907,8 @@ void pm_runtime_get_suppliers(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) - if (link->flags & DL_FLAG_PM_RUNTIME) { + dev_for_each_link_to_supplier(link, dev) + if (device_link_test(link, DL_FLAG_PM_RUNTIME)) { link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); } @@ -1933,7 +1962,7 @@ static void pm_runtime_drop_link_count(struct device *dev) */ void pm_runtime_drop_link(struct device_link *link) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) return; pm_runtime_drop_link_count(link->consumer); @@ -1941,13 +1970,23 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -bool pm_runtime_need_not_resume(struct device *dev) +static pm_callback_t get_callback(struct device *dev, size_t cb_offset) { - return atomic_read(&dev->power.usage_count) <= 1 && - (atomic_read(&dev->power.child_count) == 0 || - dev->power.ignore_children); + /* + * Setting power.strict_midlayer means that the middle layer + * code does not want its runtime PM callbacks to be invoked via + * pm_runtime_force_suspend() and pm_runtime_force_resume(), so + * return a direct pointer to the driver callback in that case. + */ + if (dev_pm_strict_midlayer_is_set(dev)) + return __rpm_get_driver_callback(dev, cb_offset); + + return __rpm_get_callback(dev, cb_offset); } +#define GET_CALLBACK(dev, callback) \ + get_callback(dev, offsetof(struct dev_pm_ops, callback)) + /** * pm_runtime_force_suspend - Force a device into suspend state if needed. * @dev: Device to suspend. @@ -1964,10 +2003,6 @@ bool pm_runtime_need_not_resume(struct device *dev) * sure the device is put into low power state and it should only be used during * system-wide PM transitions to sleep states. It assumes that the analogous * pm_runtime_force_resume() will be used to resume the device. - * - * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent - * state where this function has called the ->runtime_suspend callback but the - * PM core marks the driver as runtime active. */ int pm_runtime_force_suspend(struct device *dev) { @@ -1975,10 +2010,10 @@ int pm_runtime_force_suspend(struct device *dev) int ret; pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) return 0; - callback = RPM_GET_CALLBACK(dev, runtime_suspend); + callback = GET_CALLBACK(dev, runtime_suspend); dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; @@ -1990,15 +2025,16 @@ int pm_runtime_force_suspend(struct device *dev) /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of - * its parent, but set its status to RPM_SUSPENDED anyway in case this - * function will be called again for it in the meantime. + * its parent and the usage counters of its suppliers. Otherwise, set + * power.needs_force_resume to let pm_runtime_force_resume() know that + * the device needs to be taken care of and to prevent this function + * from handling the device again in case the device is passed to it + * once more subsequently. */ - if (pm_runtime_need_not_resume(dev)) { + if (pm_runtime_need_not_resume(dev)) pm_runtime_set_suspended(dev); - } else { - __update_runtime_status(dev, RPM_SUSPENDED); - dev->power.needs_force_resume = 1; - } + else + dev->power.needs_force_resume = true; return 0; @@ -2009,33 +2045,37 @@ err: } EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); +#ifdef CONFIG_PM_SLEEP + /** * pm_runtime_force_resume - Force a device into resume state if needed. * @dev: Device to resume. * - * Prior invoking this function we expect the user to have brought the device - * into low power state by a call to pm_runtime_force_suspend(). Here we reverse - * those actions and bring the device into full power, if it is expected to be - * used on system resume. In the other case, we defer the resume to be managed - * via runtime PM. + * This function expects that either pm_runtime_force_suspend() has put the + * device into a low-power state prior to calling it, or the device had been + * runtime-suspended before the preceding system-wide suspend transition and it + * was left in suspend during that transition. + * + * The actions carried out by pm_runtime_force_suspend(), or by a runtime + * suspend in general, are reversed and the device is brought back into full + * power if it is expected to be used on system resume, which is the case when + * its needs_force_resume flag is set or when its smart_suspend flag is set and + * its runtime PM status is "active". + * + * In other cases, the resume is deferred to be managed via runtime PM. * - * Typically this function may be invoked from a system resume callback. + * Typically, this function may be invoked from a system resume callback. */ int pm_runtime_force_resume(struct device *dev) { int (*callback)(struct device *); int ret = 0; - if (!dev->power.needs_force_resume) + if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || + pm_runtime_status_suspended(dev))) goto out; - /* - * The value of the parent's children counter is correct already, so - * just update the status of the device. - */ - __update_runtime_status(dev, RPM_ACTIVE); - - callback = RPM_GET_CALLBACK(dev, runtime_resume); + callback = GET_CALLBACK(dev, runtime_resume); dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; @@ -2046,9 +2086,30 @@ int pm_runtime_force_resume(struct device *dev) } pm_runtime_mark_last_busy(dev); + out: - dev->power.needs_force_resume = 0; + /* + * The smart_suspend flag can be cleared here because it is not going + * to be necessary until the next system-wide suspend transition that + * will update it again. + */ + dev->power.smart_suspend = false; + /* + * Also clear needs_force_resume to make this function skip devices that + * have been seen by it once. + */ + dev->power.needs_force_resume = false; + pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_runtime_force_resume); + +bool pm_runtime_need_not_resume(struct device *dev) +{ + return atomic_read(&dev->power.usage_count) <= 1 && + (atomic_read(&dev->power.child_count) == 0 || + dev->power.ignore_children); +} + +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index cd6e559648b2..d8da7195bb00 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -238,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size) unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); if (hash == value) { - int len = snprintf(buf, size, "%s\n", + int len = scnprintf(buf, size, "%s\n", dev_driver_string(dev)); - if (len > size) - len = size; buf += len; ret += len; size -= len; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f7c96a3bf719..1e1a0e7eeac5 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -189,17 +189,16 @@ static void wakeup_source_remove(struct wakeup_source *ws) if (WARN_ON(!ws)) return; + /* + * After shutting down the timer, wakeup_source_activate() will warn if + * the given wakeup source is passed to it. + */ + timer_shutdown_sync(&ws->timer); + raw_spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); - - timer_delete_sync(&ws->timer); - /* - * Clear timer.function to make wakeup_source_not_registered() treat - * this wakeup source as not registered. - */ - ws->timer.function = NULL; } /** @@ -506,14 +505,14 @@ int device_set_wakeup_enable(struct device *dev, bool enable) EXPORT_SYMBOL_GPL(device_set_wakeup_enable); /** - * wakeup_source_not_registered - validate the given wakeup source. + * wakeup_source_not_usable - validate the given wakeup source. * @ws: Wakeup source to be validated. */ -static bool wakeup_source_not_registered(struct wakeup_source *ws) +static bool wakeup_source_not_usable(struct wakeup_source *ws) { /* - * Use timer struct to check if the given source is initialized - * by wakeup_source_add. + * Use the timer struct to check if the given wakeup source has been + * initialized by wakeup_source_add() and it is not going away. */ return ws->timer.function != pm_wakeup_timer_fn; } @@ -558,8 +557,7 @@ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; - if (WARN_ONCE(wakeup_source_not_registered(ws), - "unregistered wakeup source\n")) + if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n")) return; ws->active = true; @@ -759,7 +757,7 @@ EXPORT_SYMBOL_GPL(pm_relax); */ static void pm_wakeup_timer_fn(struct timer_list *t) { - struct wakeup_source *ws = from_timer(ws, t, timer); + struct wakeup_source *ws = timer_container_of(ws, t, timer); unsigned long flags; spin_lock_irqsave(&ws->lock, flags); |
