diff options
| -rw-r--r-- | kernel/sched/fair.c | 26 |
1 files changed, 9 insertions, 17 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b9b4bbbf0af6..8334580ed3a3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1164,6 +1164,14 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) curr->exec_start = now; curr->sum_exec_runtime += delta_exec; + if (entity_is_task(curr)) { + struct task_struct *p = task_of(curr); + + trace_sched_stat_runtime(p, delta_exec); + account_group_exec_runtime(p, delta_exec); + cgroup_account_cputime(p, delta_exec); + } + if (schedstat_enabled()) { struct sched_statistics *stats; @@ -1175,26 +1183,14 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) return delta_exec; } -static inline void update_curr_task(struct task_struct *p, s64 delta_exec) -{ - trace_sched_stat_runtime(p, delta_exec); - account_group_exec_runtime(p, delta_exec); - cgroup_account_cputime(p, delta_exec); -} - /* * Used by other classes to account runtime. */ s64 update_curr_common(struct rq *rq) { struct task_struct *donor = rq->donor; - s64 delta_exec; - delta_exec = update_curr_se(rq, &donor->se); - if (likely(delta_exec > 0)) - update_curr_task(donor, delta_exec); - - return delta_exec; + return update_curr_se(rq, &donor->se); } /* @@ -1219,10 +1215,6 @@ static void update_curr(struct cfs_rq *cfs_rq) update_min_vruntime(cfs_rq); if (entity_is_task(curr)) { - struct task_struct *p = task_of(curr); - - update_curr_task(p, delta_exec); - /* * If the fair_server is active, we need to account for the * fair_server time whether or not the task is running on |
