diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/arch_topology.c | 52 | ||||
-rw-r--r-- | drivers/base/cpu.c | 3 | ||||
-rw-r--r-- | drivers/base/devres.c | 11 | ||||
-rw-r--r-- | drivers/base/node.c | 2 | ||||
-rw-r--r-- | drivers/base/platform-msi.c | 1 | ||||
-rw-r--r-- | drivers/base/power/main.c | 218 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 46 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 15 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 2 | ||||
-rw-r--r-- | drivers/base/power/wakeup_stats.c | 2 | ||||
-rw-r--r-- | drivers/base/regmap/Kconfig | 4 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 13 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 103 | ||||
-rw-r--r-- | drivers/base/topology.c | 52 |
14 files changed, 381 insertions, 143 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index af0029d30dbe..1037169abb45 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -154,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, per_cpu(arch_freq_scale, i) = scale; } -DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; -EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); - -void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) -{ - per_cpu(cpu_scale, cpu) = capacity; -} - DEFINE_PER_CPU(unsigned long, hw_pressure); /** @@ -207,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus, } EXPORT_SYMBOL_GPL(topology_update_hw_pressure); -static ssize_t cpu_capacity_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); -} - static void update_topology_flags_workfn(struct work_struct *work); static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); -static DEVICE_ATTR_RO(cpu_capacity); - -static int cpu_capacity_sysctl_add(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_create_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int cpu_capacity_sysctl_remove(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_remove_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int register_cpu_capacity_sysctl(void) -{ - cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", - cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); - - return 0; -} -subsys_initcall(register_cpu_capacity_sysctl); - static int update_topology; int topology_update_cpu_topology(void) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 50651435577c..7779ab0ca7ce 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(ghostwrite); +CPU_SHOW_VULN_FALLBACK(old_microcode); CPU_SHOW_VULN_FALLBACK(indirect_target_selection); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); @@ -617,6 +618,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); +static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { @@ -635,6 +637,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_gather_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr, &dev_attr_ghostwrite.attr, + &dev_attr_old_microcode.attr, &dev_attr_indirect_target_selection.attr, NULL }; diff --git a/drivers/base/devres.c b/drivers/base/devres.c index d8a733ea5e1a..7c20517a52c2 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -759,6 +759,17 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co } EXPORT_SYMBOL_GPL(__devm_add_action); +bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres devres = { + .data = data, + .action = action, + }; + + return devres_find(dev, devm_action_release, devm_action_match, &devres); +} +EXPORT_SYMBOL_GPL(devm_is_action_added); + /** * devm_remove_action_nowarn() - removes previously added custom action * @dev: Device that owns the action diff --git a/drivers/base/node.c b/drivers/base/node.c index cd13ef287011..618712071a1e 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -468,7 +468,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_PAGETABLE)), nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), nid, 0UL, - nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), + nid, 0UL, nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), nid, K(sreclaimable + node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 0e60dd650b5e..70db08f3ac6f 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); void platform_device_msi_free_irqs_all(struct device *dev) { msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); + msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN); } EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c8b0a9e29ed8..19fd55b8ac77 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -63,6 +63,7 @@ static LIST_HEAD(dpm_noirq_list); static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; +static DEFINE_MUTEX(async_wip_mtx); static int async_error; static const char *pm_verb(int event) @@ -560,7 +561,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) struct timer_list *timer = &wd->timer; timer_delete_sync(timer); - destroy_timer_on_stack(timer); + timer_destroy_on_stack(timer); } #else #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) @@ -597,8 +598,11 @@ static bool is_async(struct device *dev) && !pm_trace_is_enabled(); } -static bool dpm_async_fn(struct device *dev, async_func_t func) +static bool __dpm_async(struct device *dev, async_func_t func) { + if (dev->power.work_in_progress) + return true; + if (!is_async(dev)) return false; @@ -611,14 +615,37 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) put_device(dev); + return false; +} + +static bool dpm_async_fn(struct device *dev, async_func_t func) +{ + guard(mutex)(&async_wip_mtx); + + return __dpm_async(dev, func); +} + +static int dpm_async_with_cleanup(struct device *dev, void *fn) +{ + guard(mutex)(&async_wip_mtx); + + if (!__dpm_async(dev, fn)) + dev->power.work_in_progress = false; + + return 0; +} + +static void dpm_async_resume_children(struct device *dev, async_func_t func) +{ /* - * async_schedule_dev_nocall() above has returned false, so func() is - * not running and it is safe to update power.work_in_progress without - * extra synchronization. + * Start processing "async" children of the device unless it's been + * started already for them. + * + * This could have been done for the device's "async" consumers too, but + * they either need to wait for their parents or the processing has + * already started for them after their parents were processed. */ - dev->power.work_in_progress = false; - - return false; + device_for_each_child(dev, func, dpm_async_with_cleanup); } static void dpm_clear_async_state(struct device *dev) @@ -627,6 +654,13 @@ static void dpm_clear_async_state(struct device *dev) dev->power.work_in_progress = false; } +static bool dpm_root_device(struct device *dev) +{ + return !dev->parent; +} + +static void async_resume_noirq(void *data, async_cookie_t cookie); + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -710,6 +744,8 @@ Out: dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } + + dpm_async_resume_children(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -733,19 +769,20 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_noirq_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_noirq); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_noirq); } while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); list_move_tail(&dev->power.entry, &dpm_late_early_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_noirq)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -781,6 +818,8 @@ void dpm_resume_noirq(pm_message_t state) device_wakeup_disarm_wake_irqs(); } +static void async_resume_early(void *data, async_cookie_t cookie); + /** * device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. @@ -848,6 +887,8 @@ Out: dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } + + dpm_async_resume_children(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -875,19 +916,20 @@ void dpm_resume_early(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_late_early_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_early); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_early); } while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); list_move_tail(&dev->power.entry, &dpm_suspended_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_early)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -919,6 +961,8 @@ void dpm_resume_start(pm_message_t state) } EXPORT_SYMBOL_GPL(dpm_resume_start); +static void async_resume(void *data, async_cookie_t cookie); + /** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. @@ -1018,6 +1062,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } + + dpm_async_resume_children(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1049,19 +1095,20 @@ void dpm_resume(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_suspended_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume); } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); list_move_tail(&dev->power.entry, &dpm_prepared_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -1189,6 +1236,41 @@ EXPORT_SYMBOL_GPL(dpm_resume_end); /*------------------------- Suspend routines -------------------------*/ +static bool dpm_leaf_device(struct device *dev) +{ + struct device *child; + + lockdep_assert_held(&dpm_list_mtx); + + child = device_find_any_child(dev); + if (child) { + put_device(child); + + return false; + } + + return true; +} + +static void dpm_async_suspend_parent(struct device *dev, async_func_t func) +{ + guard(mutex)(&dpm_list_mtx); + + /* + * If the device is suspended asynchronously and the parent's callback + * deletes both the device and the parent itself, the parent object may + * be freed while this function is running, so avoid that by checking + * if the device has been deleted already as the parent cannot be + * deleted before it. + */ + if (!device_pm_initialized(dev)) + return; + + /* Start processing the device's parent if it is "async". */ + if (dev->parent) + dpm_async_with_cleanup(dev->parent, func); +} + /** * resume_event - Return a "resume" message for given "suspend" sleep state. * @sleep_state: PM message representing a sleep state. @@ -1226,6 +1308,8 @@ static void dpm_superior_set_must_resume(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend_noirq(void *data, async_cookie_t cookie); + /** * device_suspend_noirq - Execute a "noirq suspend" callback for given device. * @dev: Device to handle. @@ -1304,7 +1388,13 @@ Skip: Complete: complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || async_error) + return error; + + dpm_async_suspend_parent(dev, async_suspend_noirq); + + return 0; } static void async_suspend_noirq(void *data, async_cookie_t cookie) @@ -1318,6 +1408,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie) static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); + struct device *dev; int error = 0; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); @@ -1327,12 +1418,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_noirq); + } + while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.prev); + dev = to_device(dpm_late_early_list.prev); list_move(&dev->power.entry, &dpm_noirq_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_noirq)) continue; @@ -1346,8 +1446,14 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (error || async_error) { + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice(&dpm_late_early_list, &dpm_noirq_list); break; + } } mutex_unlock(&dpm_list_mtx); @@ -1400,6 +1506,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) spin_unlock_irq(&parent->power.lock); } +static void async_suspend_late(void *data, async_cookie_t cookie); + /** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. @@ -1476,7 +1584,13 @@ Skip: Complete: TRACE_SUSPEND(error); complete_all(&dev->power.completion); - return error; + + if (error || async_error) + return error; + + dpm_async_suspend_parent(dev, async_suspend_late); + + return 0; } static void async_suspend_late(void *data, async_cookie_t cookie) @@ -1494,6 +1608,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie) int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); + struct device *dev; int error = 0; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); @@ -1505,12 +1620,21 @@ int dpm_suspend_late(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_late); + } + while (!list_empty(&dpm_suspended_list)) { - struct device *dev = to_device(dpm_suspended_list.prev); + dev = to_device(dpm_suspended_list.prev); list_move(&dev->power.entry, &dpm_late_early_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_late)) continue; @@ -1524,8 +1648,14 @@ int dpm_suspend_late(pm_message_t state) mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (error || async_error) { + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice(&dpm_suspended_list, &dpm_late_early_list); break; + } } mutex_unlock(&dpm_list_mtx); @@ -1614,6 +1744,8 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend(void *data, async_cookie_t cookie); + /** * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. @@ -1743,7 +1875,13 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || async_error) + return error; + + dpm_async_suspend_parent(dev, async_suspend); + + return 0; } static void async_suspend(void *data, async_cookie_t cookie) @@ -1761,6 +1899,7 @@ static void async_suspend(void *data, async_cookie_t cookie) int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); + struct device *dev; int error = 0; trace_suspend_resume(TPS("dpm_suspend"), state.event, true); @@ -1774,12 +1913,21 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend); + } + while (!list_empty(&dpm_prepared_list)) { - struct device *dev = to_device(dpm_prepared_list.prev); + dev = to_device(dpm_prepared_list.prev); list_move(&dev->power.entry, &dpm_suspended_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend)) continue; @@ -1793,8 +1941,14 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (error || async_error) { + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice(&dpm_prepared_list, &dpm_suspended_list); break; + } } mutex_unlock(&dpm_list_mtx); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0e127b0329c0..c55a7c70bc1a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1011,7 +1011,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_get_mono_fast_ns()) { + if (expires > 0 && expires <= ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -1568,6 +1568,32 @@ out: } EXPORT_SYMBOL_GPL(pm_runtime_enable); +static void pm_runtime_set_suspended_action(void *data) +{ + pm_runtime_set_suspended(data); +} + +/** + * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_set_active_enabled(struct device *dev) +{ + int err; + + err = pm_runtime_set_active(dev); + if (err) + return err; + + err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); + if (err) + return err; + + return devm_pm_runtime_enable(dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled); + static void pm_runtime_disable_action(void *data) { pm_runtime_dont_use_autosuspend(data); @@ -1590,6 +1616,24 @@ int devm_pm_runtime_enable(struct device *dev) } EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); +static void pm_runtime_put_noidle_action(void *data) +{ + pm_runtime_put_noidle(data); +} + +/** + * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_get_noresume(struct device *dev) +{ + pm_runtime_get_noresume(dev); + + return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); + /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index f84018125b46..13b31a3adc77 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async); #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute *power_attrs[] = { -#ifdef CONFIG_PM_ADVANCED_DEBUG -#ifdef CONFIG_PM_SLEEP +#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP) &dev_attr_async.attr, #endif - &dev_attr_runtime_status.attr, - &dev_attr_runtime_usage.attr, - &dev_attr_runtime_active_kids.attr, - &dev_attr_runtime_enabled.attr, -#endif /* CONFIG_PM_ADVANCED_DEBUG */ NULL, }; static const struct attribute_group pm_attr_group = { @@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = { }; static struct attribute *runtime_attrs[] = { -#ifndef CONFIG_PM_ADVANCED_DEBUG &dev_attr_runtime_status.attr, -#endif &dev_attr_control.attr, &dev_attr_runtime_suspended_time.attr, &dev_attr_runtime_active_time.attr, &dev_attr_autosuspend_delay_ms.attr, +#ifdef CONFIG_PM_ADVANCED_DEBUG + &dev_attr_runtime_usage.attr, + &dev_attr_runtime_active_kids.attr, + &dev_attr_runtime_enabled.attr, +#endif NULL, }; static const struct attribute_group pm_runtime_attr_group = { diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 63bf914a4d44..7e612977be1b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -337,7 +337,7 @@ int device_wakeup_enable(struct device *dev) if (!dev || !dev->power.can_wakeup) return -EINVAL; - if (pm_suspend_target_state != PM_SUSPEND_ON) + if (pm_sleep_transition_in_progress()) dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); ws = wakeup_source_register(dev, dev_name(dev)); diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c index 6732ed2869f9..3ffd427248e8 100644 --- a/drivers/base/power/wakeup_stats.c +++ b/drivers/base/power/wakeup_stats.c @@ -34,6 +34,7 @@ wakeup_attr(active_count); wakeup_attr(event_count); wakeup_attr(wakeup_count); wakeup_attr(expire_count); +wakeup_attr(relax_count); static ssize_t active_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = { &dev_attr_event_count.attr, &dev_attr_wakeup_count.attr, &dev_attr_expire_count.attr, + &dev_attr_relax_count.attr, &dev_attr_active_time_ms.attr, &dev_attr_total_time_ms.attr, &dev_attr_max_time_ms.attr, diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index b1affac70d5d..ffb2ef488298 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -6,8 +6,6 @@ config REGMAP bool default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI) - select IRQ_DOMAIN if REGMAP_IRQ - select MDIO_BUS if REGMAP_MDIO help Enable support for the Register Map (regmap) access API. @@ -58,12 +56,14 @@ config REGMAP_W1 config REGMAP_MDIO tristate + select MDIO_BUS config REGMAP_MMIO tristate config REGMAP_IRQ bool + select IRQ_DOMAIN config REGMAP_RAM tristate diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index f7fcf2de1301..c7650fa434ad 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -34,21 +34,10 @@ static int regcache_defaults_cmp(const void *a, const void *b) return 0; } -static void regcache_defaults_swap(void *a, void *b, int size) -{ - struct reg_default *x = a; - struct reg_default *y = b; - struct reg_default tmp; - - tmp = *x; - *x = *y; - *y = tmp; -} - void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults) { sort(defaults, ndefaults, sizeof(*defaults), - regcache_defaults_cmp, regcache_defaults_swap); + regcache_defaults_cmp, NULL); } EXPORT_SYMBOL_GPL(regcache_sort_defaults); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 6c6869188c31..d1585f073776 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -6,11 +6,13 @@ // // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> +#include <linux/array_size.h> #include <linux/device.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/overflow.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -33,6 +35,7 @@ struct regmap_irq_chip_data { void *status_reg_buf; unsigned int *main_status_buf; unsigned int *status_buf; + unsigned int *prev_status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; unsigned int *wake_buf; @@ -193,10 +196,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data) /* If we've changed our wakeup count propagate it to the parent */ if (d->wake_count < 0) for (i = d->wake_count; i < 0; i++) - irq_set_irq_wake(d->irq, 0); + disable_irq_wake(d->irq); else if (d->wake_count > 0) for (i = 0; i < d->wake_count; i++) - irq_set_irq_wake(d->irq, 1); + enable_irq_wake(d->irq); d->wake_count = 0; @@ -332,27 +335,13 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, return ret; } -static irqreturn_t regmap_irq_thread(int irq, void *d) +static int read_irq_data(struct regmap_irq_chip_data *data) { - struct regmap_irq_chip_data *data = d; const struct regmap_irq_chip *chip = data->chip; struct regmap *map = data->map; int ret, i; - bool handled = false; u32 reg; - if (chip->handle_pre_irq) - chip->handle_pre_irq(chip->irq_drv_data); - - if (chip->runtime_pm) { - ret = pm_runtime_get_sync(map->dev); - if (ret < 0) { - dev_err(map->dev, "IRQ thread failed to resume: %d\n", - ret); - goto exit; - } - } - /* * Read only registers with active IRQs if the chip has 'main status * register'. Else read in the statuses, using a single bulk read if @@ -379,10 +368,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) reg = data->get_irq_reg(data, chip->main_status, i); ret = regmap_read(map, reg, &data->main_status_buf[i]); if (ret) { - dev_err(map->dev, - "Failed to read IRQ status %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status %d\n", ret); + return ret; } } @@ -398,10 +385,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) ret = read_sub_irq_data(data, b); if (ret != 0) { - dev_err(map->dev, - "Failed to read IRQ status %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status %d\n", ret); + return ret; } } @@ -418,9 +403,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_reg_buf, chip->num_regs); if (ret != 0) { - dev_err(map->dev, "Failed to read IRQ status: %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); + return ret; } for (i = 0; i < data->chip->num_regs; i++) { @@ -436,7 +420,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) break; default: BUG(); - goto exit; + return -EIO; } } @@ -447,10 +431,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) ret = regmap_read(map, reg, &data->status_buf[i]); if (ret != 0) { - dev_err(map->dev, - "Failed to read IRQ status: %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); + return ret; } } } @@ -459,6 +441,42 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) for (i = 0; i < data->chip->num_regs; i++) data->status_buf[i] = ~data->status_buf[i]; + return 0; +} + +static irqreturn_t regmap_irq_thread(int irq, void *d) +{ + struct regmap_irq_chip_data *data = d; + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + int ret, i; + bool handled = false; + u32 reg; + + if (chip->handle_pre_irq) + chip->handle_pre_irq(chip->irq_drv_data); + + if (chip->runtime_pm) { + ret = pm_runtime_get_sync(map->dev); + if (ret < 0) { + dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret); + goto exit; + } + } + + ret = read_irq_data(data); + if (ret < 0) + goto exit; + + if (chip->status_is_level) { + for (i = 0; i < data->chip->num_regs; i++) { + unsigned int val = data->status_buf[i]; + + data->status_buf[i] ^= data->prev_status_buf[i]; + data->prev_status_buf[i] = val; + } + } + /* * Ignore masked IRQs and ack if we need to; we ack early so * there is no race between handling and acknowledging the @@ -705,6 +723,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (!d->status_buf) goto err_alloc; + if (chip->status_is_level) { + d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf), + GFP_KERNEL); + if (!d->prev_status_buf) + goto err_alloc; + } + d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf), GFP_KERNEL); if (!d->mask_buf) @@ -881,6 +906,16 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, } } + /* Store current levels */ + if (chip->status_is_level) { + ret = read_irq_data(d); + if (ret < 0) + goto err_alloc; + + memcpy(d->prev_status_buf, d->status_buf, + array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0]))); + } + ret = regmap_irq_create_domain(fwnode, irq_base, chip, d); if (ret) goto err_alloc; @@ -908,6 +943,7 @@ err_alloc: kfree(d->mask_buf); kfree(d->main_status_buf); kfree(d->status_buf); + kfree(d->prev_status_buf); kfree(d->status_reg_buf); if (d->config_buf) { for (i = 0; i < chip->num_config_bases; i++) @@ -985,6 +1021,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->main_status_buf); kfree(d->status_reg_buf); kfree(d->status_buf); + kfree(d->prev_status_buf); if (d->config_buf) { for (i = 0; i < d->chip->num_config_bases; i++) kfree(d->config_buf[i]); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index b962da263eee..8b42df05feff 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -208,3 +208,55 @@ static int __init topology_sysfs_init(void) } device_initcall(topology_sysfs_init); + +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) +{ + per_cpu(cpu_scale, cpu) = capacity; +} + +static ssize_t cpu_capacity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + + return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); +} + +static DEVICE_ATTR_RO(cpu_capacity); + +static int cpu_capacity_sysctl_add(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_create_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int cpu_capacity_sysctl_remove(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_remove_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int register_cpu_capacity_sysctl(void) +{ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", + cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); + + return 0; +} +subsys_initcall(register_cpu_capacity_sysctl); |