summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-03-21 08:48:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-03-21 08:48:40 -0700
commitcb90c8df91d08aebb62ef77bd1c7f41a31bdc924 (patch)
tree9c5da28e5b7777996ba46dc9f3f2a4b8dcb61b69
parentb3ee1e4609512dfff642a96b34d7e5dfcdc92d05 (diff)
parent76f970ce51c80f625eb6ddbb24e9cb51b977b598 (diff)
Merge tag 'sched-urgent-2025-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fix from Ingo Molnar: "Revert a scheduler performance optimization that regressed other workloads" * tag 'sched-urgent-2025-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: Revert "sched/core: Reduce cost of sched_move_task when config autogroup"
-rw-r--r--kernel/sched/core.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 67189907214d..042351c7afce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9016,7 +9016,7 @@ void sched_release_group(struct task_group *tg)
spin_unlock_irqrestore(&task_group_lock, flags);
}
-static struct task_group *sched_get_task_group(struct task_struct *tsk)
+static void sched_change_group(struct task_struct *tsk)
{
struct task_group *tg;
@@ -9028,13 +9028,7 @@ static struct task_group *sched_get_task_group(struct task_struct *tsk)
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
-
- return tg;
-}
-
-static void sched_change_group(struct task_struct *tsk, struct task_group *group)
-{
- tsk->sched_task_group = group;
+ tsk->sched_task_group = tg;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_change_group)
@@ -9055,20 +9049,11 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
{
int queued, running, queue_flags =
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
- struct task_group *group;
struct rq *rq;
CLASS(task_rq_lock, rq_guard)(tsk);
rq = rq_guard.rq;
- /*
- * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
- * group changes.
- */
- group = sched_get_task_group(tsk);
- if (group == tsk->sched_task_group)
- return;
-
update_rq_clock(rq);
running = task_current_donor(rq, tsk);
@@ -9079,7 +9064,7 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
if (running)
put_prev_task(rq, tsk);
- sched_change_group(tsk, group);
+ sched_change_group(tsk);
if (!for_autogroup)
scx_cgroup_move_task(tsk);