summaryrefslogtreecommitdiff
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/energy_model.c72
-rw-r--r--kernel/power/hibernate.c23
-rw-r--r--kernel/power/main.c8
-rw-r--r--kernel/power/power.h4
-rw-r--r--kernel/power/process.c8
-rw-r--r--kernel/power/snapshot.c42
-rw-r--r--kernel/power/wakelock.c3
7 files changed, 98 insertions, 62 deletions
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index d9b7e2b38c7a..ea7995a25780 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -233,6 +233,10 @@ static int em_compute_costs(struct device *dev, struct em_perf_state *table,
unsigned long prev_cost = ULONG_MAX;
int i, ret;
+ /* This is needed only for CPUs and EAS skip other devices */
+ if (!_is_cpu_device(dev))
+ return 0;
+
/* Compute the cost of each performance state. */
for (i = nr_states - 1; i >= 0; i--) {
unsigned long power_res, cost;
@@ -698,10 +702,12 @@ static int em_recalc_and_update(struct device *dev, struct em_perf_domain *pd,
{
int ret;
- ret = em_compute_costs(dev, em_table->state, NULL, pd->nr_perf_states,
- pd->flags);
- if (ret)
- goto free_em_table;
+ if (!em_is_artificial(pd)) {
+ ret = em_compute_costs(dev, em_table->state, NULL,
+ pd->nr_perf_states, pd->flags);
+ if (ret)
+ goto free_em_table;
+ }
ret = em_dev_update_perf_domain(dev, em_table);
if (ret)
@@ -721,10 +727,24 @@ free_em_table:
* Adjustment of CPU performance values after boot, when all CPUs capacites
* are correctly calculated.
*/
-static void em_adjust_new_capacity(struct device *dev,
+static void em_adjust_new_capacity(unsigned int cpu, struct device *dev,
struct em_perf_domain *pd)
{
+ unsigned long cpu_capacity = arch_scale_cpu_capacity(cpu);
struct em_perf_table *em_table;
+ struct em_perf_state *table;
+ unsigned long em_max_perf;
+
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
+ em_max_perf = table[pd->nr_perf_states - 1].performance;
+ rcu_read_unlock();
+
+ if (em_max_perf == cpu_capacity)
+ return;
+
+ pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n", cpu,
+ cpu_capacity, em_max_perf);
em_table = em_table_dup(pd);
if (!em_table) {
@@ -737,12 +757,27 @@ static void em_adjust_new_capacity(struct device *dev,
em_recalc_and_update(dev, pd, em_table);
}
+/**
+ * em_adjust_cpu_capacity() - Adjust the EM for a CPU after a capacity update.
+ * @cpu: Target CPU.
+ *
+ * Adjust the existing EM for @cpu after a capacity update under the assumption
+ * that the capacity has been updated in the same way for all of the CPUs in
+ * the same perf domain.
+ */
+void em_adjust_cpu_capacity(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+ struct em_perf_domain *pd;
+
+ pd = em_pd_get(dev);
+ if (pd)
+ em_adjust_new_capacity(cpu, dev, pd);
+}
+
static void em_check_capacity_update(void)
{
cpumask_var_t cpu_done_mask;
- struct em_perf_state *table;
- struct em_perf_domain *pd;
- unsigned long cpu_capacity;
int cpu;
if (!zalloc_cpumask_var(&cpu_done_mask, GFP_KERNEL)) {
@@ -753,7 +788,7 @@ static void em_check_capacity_update(void)
/* Check if CPUs capacity has changed than update EM */
for_each_possible_cpu(cpu) {
struct cpufreq_policy *policy;
- unsigned long em_max_perf;
+ struct em_perf_domain *pd;
struct device *dev;
if (cpumask_test_cpu(cpu, cpu_done_mask))
@@ -776,24 +811,7 @@ static void em_check_capacity_update(void)
cpumask_or(cpu_done_mask, cpu_done_mask,
em_span_cpus(pd));
- cpu_capacity = arch_scale_cpu_capacity(cpu);
-
- rcu_read_lock();
- table = em_perf_state_from_pd(pd);
- em_max_perf = table[pd->nr_perf_states - 1].performance;
- rcu_read_unlock();
-
- /*
- * Check if the CPU capacity has been adjusted during boot
- * and trigger the update for new performance values.
- */
- if (em_max_perf == cpu_capacity)
- continue;
-
- pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n",
- cpu, cpu_capacity, em_max_perf);
-
- em_adjust_new_capacity(dev, pd);
+ em_adjust_new_capacity(cpu, dev, pd);
}
free_cpumask_var(cpu_done_mask);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 338c9917d4ee..519fb09de5e0 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -90,6 +90,11 @@ void hibernate_release(void)
atomic_inc(&hibernate_atomic);
}
+bool hibernation_in_progress(void)
+{
+ return !atomic_read(&hibernate_atomic);
+}
+
bool hibernation_available(void)
{
return nohibernate == 0 &&
@@ -133,10 +138,15 @@ bool system_entering_hibernation(void)
EXPORT_SYMBOL(system_entering_hibernation);
#ifdef CONFIG_PM_DEBUG
+static unsigned int pm_test_delay = 5;
+module_param(pm_test_delay, uint, 0644);
+MODULE_PARM_DESC(pm_test_delay,
+ "Number of seconds to wait before resuming from hibernation test");
static void hibernation_debug_sleep(void)
{
- pr_info("debug: Waiting for 5 seconds.\n");
- mdelay(5000);
+ pr_info("hibernation debug: Waiting for %d second(s).\n",
+ pm_test_delay);
+ mdelay(pm_test_delay * 1000);
}
static int hibernation_test(int level)
@@ -757,7 +767,7 @@ int hibernate(void)
* Query for the compression algorithm support if compression is enabled.
*/
if (!nocompress) {
- strscpy(hib_comp_algo, hibernate_compressor, sizeof(hib_comp_algo));
+ strscpy(hib_comp_algo, hibernate_compressor);
if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) {
pr_err("%s compression is not available\n", hib_comp_algo);
return -EOPNOTSUPP;
@@ -1013,9 +1023,9 @@ static int software_resume(void)
*/
if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) {
if (swsusp_header_flags & SF_COMPRESSION_ALG_LZ4)
- strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4, sizeof(hib_comp_algo));
+ strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4);
else
- strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO, sizeof(hib_comp_algo));
+ strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO);
if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) {
pr_err("%s compression is not available\n", hib_comp_algo);
error = -EOPNOTSUPP;
@@ -1470,8 +1480,7 @@ static int hibernate_compressor_param_set(const char *compressor,
if (index >= 0) {
ret = param_set_copystring(comp_alg_enabled[index], kp);
if (!ret)
- strscpy(hib_comp_algo, comp_alg_enabled[index],
- sizeof(hib_comp_algo));
+ strscpy(hib_comp_algo, comp_alg_enabled[index]);
} else {
ret = index;
}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 0b0e76324c43..3d484630505a 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -557,6 +557,10 @@ static int __init pm_debugfs_init(void)
late_initcall(pm_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+bool pm_sleep_transition_in_progress(void)
+{
+ return pm_suspend_in_progress() || hibernation_in_progress();
+}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
@@ -594,7 +598,7 @@ power_attr(pm_print_times);
static inline void pm_print_times_init(void)
{
- pm_print_times_enabled = !!initcall_debug;
+ pm_print_times_enabled = initcall_debug;
}
static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
@@ -613,7 +617,7 @@ bool pm_debug_messages_on __read_mostly;
bool pm_debug_messages_should_print(void)
{
- return pm_debug_messages_on && pm_suspend_target_state != PM_SUSPEND_ON;
+ return pm_debug_messages_on && pm_sleep_transition_in_progress();
}
EXPORT_SYMBOL_GPL(pm_debug_messages_should_print);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 2eb81662b8fa..cb1d71562002 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -75,10 +75,14 @@ extern void enable_restore_image_protection(void);
static inline void enable_restore_image_protection(void) {}
#endif /* CONFIG_STRICT_KERNEL_RWX */
+extern bool hibernation_in_progress(void);
+
#else /* !CONFIG_HIBERNATION */
static inline void hibernate_reserved_size_init(void) {}
static inline void hibernate_image_size_init(void) {}
+
+static inline bool hibernation_in_progress(void) { return false; }
#endif /* !CONFIG_HIBERNATION */
#define power_attr(_name) \
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 66ac067d9ae6..dc0dfc349f22 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -189,7 +189,7 @@ void thaw_processes(void)
oom_killer_enable();
- pr_info("Restarting tasks ... ");
+ pr_info("Restarting tasks: Starting\n");
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
@@ -208,7 +208,7 @@ void thaw_processes(void)
usermodehelper_enable();
schedule();
- pr_cont("done.\n");
+ pr_info("Restarting tasks: Done\n");
trace_suspend_resume(TPS("thaw_processes"), 0, false);
}
@@ -217,7 +217,7 @@ void thaw_kernel_threads(void)
struct task_struct *g, *p;
pm_nosig_freezing = false;
- pr_info("Restarting kernel threads ... ");
+ pr_info("Restarting kernel threads ...\n");
thaw_workqueues();
@@ -229,5 +229,5 @@ void thaw_kernel_threads(void)
read_unlock(&tasklist_lock);
schedule();
- pr_cont("done.\n");
+ pr_info("Done restarting kernel threads.\n");
}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 4e6e24e8b854..2af36cfe35cd 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1094,16 +1094,15 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
((unsigned long long) region->end_pfn << PAGE_SHIFT)
- 1);
- for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
- if (pfn_valid(pfn)) {
- /*
- * It is safe to ignore the result of
- * mem_bm_set_bit_check() here, since we won't
- * touch the PFNs for which the error is
- * returned anyway.
- */
- mem_bm_set_bit_check(bm, pfn);
- }
+ for_each_valid_pfn(pfn, region->start_pfn, region->end_pfn) {
+ /*
+ * It is safe to ignore the result of
+ * mem_bm_set_bit_check() here, since we won't
+ * touch the PFNs for which the error is
+ * returned anyway.
+ */
+ mem_bm_set_bit_check(bm, pfn);
+ }
}
}
@@ -1255,21 +1254,20 @@ static void mark_free_pages(struct zone *zone)
spin_lock_irqsave(&zone->lock, flags);
max_zone_pfn = zone_end_pfn(zone);
- for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
+ for_each_valid_pfn(pfn, zone->zone_start_pfn, max_zone_pfn) {
+ page = pfn_to_page(pfn);
- if (!--page_count) {
- touch_nmi_watchdog();
- page_count = WD_PAGE_COUNT;
- }
+ if (!--page_count) {
+ touch_nmi_watchdog();
+ page_count = WD_PAGE_COUNT;
+ }
- if (page_zone(page) != zone)
- continue;
+ if (page_zone(page) != zone)
+ continue;
- if (!swsusp_page_is_forbidden(page))
- swsusp_unset_page_free(page);
- }
+ if (!swsusp_page_is_forbidden(page))
+ swsusp_unset_page_free(page);
+ }
for_each_migratetype_order(order, t) {
list_for_each_entry(page,
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 52571dcad768..4e941999a53b 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -49,6 +49,9 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
len += sysfs_emit_at(buf, len, "%s ", wl->name);
}
+ if (len > 0)
+ --len;
+
len += sysfs_emit_at(buf, len, "\n");
mutex_unlock(&wakelocks_lock);