diff options
| -rw-r--r-- | include/linux/preempt.h | 12 | ||||
| -rw-r--r-- | kernel/sched/core.c | 19 | 
2 files changed, 6 insertions, 25 deletions
| diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bea8dd8ff5e0..448dfd0b2ea6 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -146,18 +146,6 @@ extern void preempt_count_sub(int val);  #define preempt_count_inc() preempt_count_add(1)  #define preempt_count_dec() preempt_count_sub(1) -#define preempt_active_enter() \ -do { \ -	preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ -	barrier(); \ -} while (0) - -#define preempt_active_exit() \ -do { \ -	barrier(); \ -	preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ -} while (0) -  #ifdef CONFIG_PREEMPT_COUNT  #define preempt_disable() \ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cfad7f5f74f8..6344d82a84f6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3201,9 +3201,9 @@ void __sched schedule_preempt_disabled(void)  static void __sched notrace preempt_schedule_common(void)  {  	do { -		preempt_active_enter(); +		preempt_disable();  		__schedule(true); -		preempt_active_exit(); +		sched_preempt_enable_no_resched();  		/*  		 * Check again in case we missed a preemption opportunity @@ -3254,13 +3254,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)  		return;  	do { -		/* -		 * Use raw __prempt_count() ops that don't call function. -		 * We can't call functions before disabling preemption which -		 * disarm preemption tracing recursions. -		 */ -		__preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); -		barrier(); +		preempt_disable_notrace();  		/*  		 * Needs preempt disabled in case user_exit() is traced  		 * and the tracer calls preempt_enable_notrace() causing @@ -3270,8 +3264,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)  		__schedule(true);  		exception_exit(prev_ctx); -		barrier(); -		__preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); +		preempt_enable_no_resched_notrace();  	} while (need_resched());  }  EXPORT_SYMBOL_GPL(preempt_schedule_notrace); @@ -3294,11 +3287,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)  	prev_state = exception_enter();  	do { -		preempt_active_enter(); +		preempt_disable();  		local_irq_enable();  		__schedule(true);  		local_irq_disable(); -		preempt_active_exit(); +		sched_preempt_enable_no_resched();  	} while (need_resched());  	exception_exit(prev_state); | 
