diff options
| -rw-r--r-- | arch/x86/include/asm/percpu.h | 43 | 
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index f899e01a8ac9..38f9e965ff96 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -177,6 +177,39 @@ do {									\  	}								\  } while (0) +/* + * Add return operation + */ +#define percpu_add_return_op(var, val)					\ +({									\ +	typeof(var) paro_ret__ = val;					\ +	switch (sizeof(var)) {						\ +	case 1:								\ +		asm("xaddb %0, "__percpu_arg(1)				\ +			    : "+q" (paro_ret__), "+m" (var)		\ +			    : : "memory");				\ +		break;							\ +	case 2:								\ +		asm("xaddw %0, "__percpu_arg(1)				\ +			    : "+r" (paro_ret__), "+m" (var)		\ +			    : : "memory");				\ +		break;							\ +	case 4:								\ +		asm("xaddl %0, "__percpu_arg(1)				\ +			    : "+r" (paro_ret__), "+m" (var)		\ +			    : : "memory");				\ +		break;							\ +	case 8:								\ +		asm("xaddq %0, "__percpu_arg(1)				\ +			    : "+re" (paro_ret__), "+m" (var)		\ +			    : : "memory");				\ +		break;							\ +	default: __bad_percpu_size();					\ +	}								\ +	paro_ret__ += val;						\ +	paro_ret__;							\ +}) +  #define percpu_from_op(op, var, constraint)		\  ({							\  	typeof(var) pfo_ret__;				\ @@ -300,6 +333,14 @@ do {									\  #define irqsafe_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)  #define irqsafe_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val) +#ifndef CONFIG_M386 +#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) +#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) +#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) +#define this_cpu_add_return_1(pcp, val)	percpu_add_return_op(pcp, val) +#define this_cpu_add_return_2(pcp, val)	percpu_add_return_op(pcp, val) +#define this_cpu_add_return_4(pcp, val)	percpu_add_return_op(pcp, val) +#endif  /*   * Per cpu atomic 64 bit operations are only available under 64 bit.   * 32 bit must fall back to generic operations. @@ -324,6 +365,8 @@ do {									\  #define irqsafe_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)  #define irqsafe_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val) +#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) +#define this_cpu_add_return_8(pcp, val)	percpu_add_return_op(pcp, val)  #endif  /* This is not atomic against other CPUs -- CPU preemption needs to be off */  | 
