diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 15 | 
1 files changed, 9 insertions, 6 deletions
| diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 52f72784e953..650d698244c4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10409,7 +10409,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,  				      bool *sg_overloaded,  				      bool *sg_overutilized)  { -	int i, nr_running, local_group; +	int i, nr_running, local_group, sd_flags = env->sd->flags;  	memset(sgs, 0, sizeof(*sgs)); @@ -10433,10 +10433,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,  		if (cpu_overutilized(i))  			*sg_overutilized = 1; -#ifdef CONFIG_NUMA_BALANCING -		sgs->nr_numa_running += rq->nr_numa_running; -		sgs->nr_preferred_running += rq->nr_preferred_running; -#endif  		/*  		 * No need to call idle_cpu() if nr_running is not 0  		 */ @@ -10446,10 +10442,17 @@ static inline void update_sg_lb_stats(struct lb_env *env,  			continue;  		} +#ifdef CONFIG_NUMA_BALANCING +		/* Only fbq_classify_group() uses this to classify NUMA groups */ +		if (sd_flags & SD_NUMA) { +			sgs->nr_numa_running += rq->nr_numa_running; +			sgs->nr_preferred_running += rq->nr_preferred_running; +		} +#endif  		if (local_group)  			continue; -		if (env->sd->flags & SD_ASYM_CPUCAPACITY) { +		if (sd_flags & SD_ASYM_CPUCAPACITY) {  			/* Check for a misfit task on the cpu */  			if (sgs->group_misfit_task_load < rq->misfit_task_load) {  				sgs->group_misfit_task_load = rq->misfit_task_load; | 
