diff options
| author | Michal Koutný <mkoutny@suse.com> | 2025-03-10 18:04:37 +0100 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2025-04-08 20:55:53 +0200 |
| commit | 61d3164fec2ed283645dc17fcc51959e8f361e18 (patch) | |
| tree | ff6eda19b63035c31b93cf9f7a053213135bf023 | |
| parent | e34e0131fea1b0f63c2105a1958c94af2ee90f4d (diff) | |
sched: Skip non-root task_groups with disabled RT_GROUP_SCHED
First, we want to prevent placement of RT tasks on non-root rt_rqs which
we achieve in the task migration code that'd fall back to
root_task_group's rt_rq.
Second, we want to work with only root_task_group's rt_rq when iterating
all "real" rt_rqs when RT_GROUP is disabled. To achieve this we keep
root_task_group as the first one on the task_groups and break out
quickly.
Signed-off-by: Michal Koutný <mkoutny@suse.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250310170442.504716-6-mkoutny@suse.com
| -rw-r--r-- | kernel/sched/core.c | 2 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 9 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 7 |
3 files changed, 14 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 58d093a8c1af..32fb4c1100cb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9020,7 +9020,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent) unsigned long flags; spin_lock_irqsave(&task_group_lock, flags); - list_add_rcu(&tg->list, &task_groups); + list_add_tail_rcu(&tg->list, &task_groups); /* Root should already exist: */ WARN_ON(!parent); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 1af3996ec0fb..efa22bad31e1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -495,6 +495,9 @@ typedef struct task_group *rt_rq_iter_t; static inline struct task_group *next_task_group(struct task_group *tg) { + if (!rt_group_sched_enabled()) + return NULL; + do { tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); @@ -507,9 +510,9 @@ static inline struct task_group *next_task_group(struct task_group *tg) } #define for_each_rt_rq(rt_rq, iter, rq) \ - for (iter = container_of(&task_groups, typeof(*iter), list); \ - (iter = next_task_group(iter)) && \ - (rt_rq = iter->rt_rq[cpu_of(rq)]);) + for (iter = &root_task_group; \ + iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \ + iter = next_task_group(iter)) #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = rt_se->parent) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d1e591f91cf8..898aab7417bd 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2165,6 +2165,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) #endif #ifdef CONFIG_RT_GROUP_SCHED + /* + * p->rt.rt_rq is NULL initially and it is easier to assign + * root_task_group's rt_rq than switching in rt_rq_of_se() + * Clobbers tg(!) + */ + if (!rt_group_sched_enabled()) + tg = &root_task_group; p->rt.rt_rq = tg->rt_rq[cpu]; p->rt.parent = tg->rt_se[cpu]; #endif |
