summaryrefslogtreecommitdiff
path: root/kernel/sched/ext_idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/ext_idle.c')
-rw-r--r--kernel/sched/ext_idle.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index ed1804506585..0912f94b95cd 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -14,6 +14,9 @@
/* Enable/disable built-in idle CPU selection policy */
static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+/* Enable/disable per-node idle cpumasks */
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
+
#ifdef CONFIG_SMP
#ifdef CONFIG_CPUMASK_OFFSTACK
#define CL_ALIGNED_IF_ONSTACK
@@ -204,7 +207,7 @@ static bool llc_numa_mismatch(void)
* CPU belongs to a single LLC domain, and that each LLC domain is entirely
* contained within a single NUMA node.
*/
-void scx_idle_update_selcpu_topology(void)
+void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
{
bool enable_llc = false, enable_numa = false;
unsigned int nr_cpus;
@@ -237,13 +240,19 @@ void scx_idle_update_selcpu_topology(void)
* If all CPUs belong to the same NUMA node and the same LLC domain,
* enabling both NUMA and LLC optimizations is unnecessary, as checking
* for an idle CPU in the same domain twice is redundant.
+ *
+ * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA
+ * optimization, as we would naturally select idle CPUs within
+ * specific NUMA nodes querying the corresponding per-node cpumask.
*/
- nr_cpus = numa_weight(cpu);
- if (nr_cpus > 0) {
- if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
- enable_numa = true;
- pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
- cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
+ if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
+ nr_cpus = numa_weight(cpu);
+ if (nr_cpus > 0) {
+ if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
+ enable_numa = true;
+ pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
+ cpumask_pr_args(numa_span(cpu)), nr_cpus);
+ }
}
rcu_read_unlock();
@@ -530,6 +539,11 @@ void scx_idle_enable(struct sched_ext_ops *ops)
}
static_branch_enable(&scx_builtin_idle_enabled);
+ if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
+ static_branch_enable(&scx_builtin_idle_per_node);
+ else
+ static_branch_disable(&scx_builtin_idle_per_node);
+
#ifdef CONFIG_SMP
/*
* Consider all online cpus idle. Should converge to the actual state
@@ -543,6 +557,7 @@ void scx_idle_enable(struct sched_ext_ops *ops)
void scx_idle_disable(void)
{
static_branch_disable(&scx_builtin_idle_enabled);
+ static_branch_disable(&scx_builtin_idle_per_node);
}
/********************************************************************************