summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2021-02-15 17:00:22 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2021-02-15 17:00:22 +0100
commitacc3a645ef4bb301366a609730df3cd6a52154a1 (patch)
tree9b64ca50d954c29b29fed94656b3a6c916e3aae0 /kernel/workqueue.c
parente1d3209f95a19df16080b069265e172738189807 (diff)
parent8a3f1f181d39892e6ca11485a3c3ec15bb8e1a60 (diff)
Merge branches 'pm-cpuidle' and 'pm-cpufreq'
* pm-cpuidle: MAINTAINERS: cpuidle: exynos: include header in file pattern intel_idle: remove definition of DEBUG * pm-cpufreq: cpufreq: Remove unused flag CPUFREQ_PM_NO_WARN cpufreq: Remove CPUFREQ_STICKY flag cpufreq: intel_pstate: Remove repeated word cpufreq: remove tango driver cpufreq: brcmstb-avs-cpufreq: Fix resource leaks in ->remove() cpufreq: brcmstb-avs-cpufreq: Free resources in error path cpufreq: qcom-hw: enable boost support cpufreq: tegra20: Use resource-managed API cpufreq: intel_pstate: Get per-CPU max freq via MSR_HWP_CAPABILITIES if available cpufreq: intel_pstate: Rename two functions cpufreq: intel_pstate: Change intel_pstate_get_hwp_max() argument cpufreq: intel_pstate: Always read hwp_cap_cached with READ_ONCE()
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9880b6c0e272..894bb885b40b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1849,18 +1849,17 @@ static void worker_attach_to_pool(struct worker *worker,
mutex_lock(&wq_pool_attach_mutex);
/*
- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
- * online CPUs. It'll be re-applied when any of the CPUs come up.
- */
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
- /*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
* definition for details.
*/
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
+ else
+ kthread_set_per_cpu(worker->task, pool->cpu);
+
+ if (worker->rescue_wq)
+ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
@@ -1883,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)
mutex_lock(&wq_pool_attach_mutex);
+ kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;
@@ -4919,8 +4919,10 @@ static void unbind_workers(int cpu)
raw_spin_unlock_irq(&pool->lock);
- for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, -1);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ }
mutex_unlock(&wq_pool_attach_mutex);
@@ -4972,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)
* of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail.
*/
- for_each_pool_worker(worker, pool)
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
+ }
raw_spin_lock_irq(&pool->lock);