From b58e733fd774f3f4b49d9e7640d172a57e35200e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 15 Jun 2020 18:24:27 +0200 Subject: rcu: Fixup noinstr warnings A KCSAN build revealed we have explicit annoations through atomic_*() usage, switch to arch_atomic_*() for the respective functions. vmlinux.o: warning: objtool: rcu_nmi_exit()+0x4d: call to __kcsan_check_access() leaves .noinstr.text section vmlinux.o: warning: objtool: rcu_dynticks_eqs_enter()+0x25: call to __kcsan_check_access() leaves .noinstr.text section vmlinux.o: warning: objtool: rcu_nmi_enter()+0x4f: call to __kcsan_check_access() leaves .noinstr.text section vmlinux.o: warning: objtool: rcu_dynticks_eqs_exit()+0x2a: call to __kcsan_check_access() leaves .noinstr.text section vmlinux.o: warning: objtool: __rcu_is_watching()+0x25: call to __kcsan_check_access() leaves .noinstr.text section Additionally, without the NOP in instrumentation_begin(), objtool would not detect the lack of the 'else instrumentation_begin();' branch in rcu_nmi_enter(). Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Paul E. McKenney --- include/linux/compiler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/compiler.h') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 30827f82ad62..204e76856435 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -123,7 +123,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #ifdef CONFIG_DEBUG_ENTRY /* Begin/end of an instrumentation safe region */ #define instrumentation_begin() ({ \ - asm volatile("%c0:\n\t" \ + asm volatile("%c0: nop\n\t" \ ".pushsection .discard.instr_begin\n\t" \ ".long %c0b - .\n\t" \ ".popsection\n\t" : : "i" (__COUNTER__)); \ -- cgit From e506ea451254ab17e0bf918ca36232fec2a9b10c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Oct 2019 16:29:32 -0700 Subject: compiler.h: Split {READ,WRITE}_ONCE definitions out into rwonce.h In preparation for allowing architectures to define their own implementation of the READ_ONCE() macro, move the generic {READ,WRITE}_ONCE() definitions out of the unwieldy 'linux/compiler.h' file and into a new 'rwonce.h' header under 'asm-generic'. Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Signed-off-by: Will Deacon --- include/linux/compiler.h | 93 ++---------------------------------------------- 1 file changed, 2 insertions(+), 91 deletions(-) (limited to 'include/linux/compiler.h') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 204e76856435..f075a3df4fe2 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -230,28 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif -/* - * Prevent the compiler from merging or refetching reads or writes. The - * compiler is also forbidden from reordering successive instances of - * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some - * particular ordering. One way to make the compiler aware of ordering is to - * put the two invocations of READ_ONCE or WRITE_ONCE in different C - * statements. - * - * These two macros will also work on aggregate data types like structs or - * unions. - * - * Their two major use cases are: (1) Mediating communication between - * process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise - * mutilate accesses that either do not require ordering or that interact - * with an explicit memory barrier or atomic instruction that provides the - * required ordering. - */ -#include -#include -#include - /** * data_race - mark an expression as containing intentional data races * @@ -272,65 +250,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, __v; \ }) -/* - * Use __READ_ONCE() instead of READ_ONCE() if you do not require any - * atomicity or dependency ordering guarantees. Note that this may result - * in tears! - */ -#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) - -#define __READ_ONCE_SCALAR(x) \ -({ \ - __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \ - smp_read_barrier_depends(); \ - (typeof(x))__x; \ -}) - -#define READ_ONCE(x) \ -({ \ - compiletime_assert_rwonce_type(x); \ - __READ_ONCE_SCALAR(x); \ -}) - -#define __WRITE_ONCE(x, val) \ -do { \ - *(volatile typeof(x) *)&(x) = (val); \ -} while (0) - -#define WRITE_ONCE(x, val) \ -do { \ - compiletime_assert_rwonce_type(x); \ - __WRITE_ONCE(x, val); \ -} while (0) - -static __no_sanitize_or_inline -unsigned long __read_once_word_nocheck(const void *addr) -{ - return __READ_ONCE(*(unsigned long *)addr); -} - -/* - * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a - * word from memory atomically but without telling KASAN/KCSAN. This is - * usually used by unwinding code when walking the stack of a running process. - */ -#define READ_ONCE_NOCHECK(x) \ -({ \ - unsigned long __x; \ - compiletime_assert(sizeof(x) == sizeof(__x), \ - "Unsupported access size for READ_ONCE_NOCHECK()."); \ - __x = __read_once_word_nocheck(&(x)); \ - smp_read_barrier_depends(); \ - (typeof(x))__x; \ -}) - -static __no_kasan_or_inline -unsigned long read_word_at_a_time(const void *addr) -{ - kasan_check_read(addr, 1); - return *(unsigned long *)addr; -} - #endif /* __KERNEL__ */ /* @@ -395,16 +314,6 @@ static inline void *offset_to_ptr(const int *off) compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") -/* - * Yes, this permits 64-bit accesses on 32-bit architectures. These will - * actually be atomic in some cases (namely Armv7 + LPAE), but for others we - * rely on the access being split into 2x32-bit accesses for a 32-bit quantity - * (e.g. a virtual address) and a strong prevailing wind. - */ -#define compiletime_assert_rwonce_type(t) \ - compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ - "Unsupported access size for {READ,WRITE}_ONCE().") - /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) @@ -414,4 +323,6 @@ static inline void *offset_to_ptr(const int *off) */ #define prevent_tail_call_optimization() mb() +#include + #endif /* __LINUX_COMPILER_H */ -- cgit From eb5c2d4b45e3d2d5d052ea6b8f1463976b1020d5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 21 Jul 2020 09:54:15 +0100 Subject: compiler.h: Move compiletime_assert() macros into compiler_types.h The kernel test robot reports that moving READ_ONCE() out into its own header breaks a W=1 build for parisc, which is relying on the definition of compiletime_assert() being available: | In file included from ./arch/parisc/include/generated/asm/rwonce.h:1, | from ./include/asm-generic/barrier.h:16, | from ./arch/parisc/include/asm/barrier.h:29, | from ./arch/parisc/include/asm/atomic.h:11, | from ./include/linux/atomic.h:7, | from kernel/locking/percpu-rwsem.c:2: | ./arch/parisc/include/asm/atomic.h: In function 'atomic_read': | ./include/asm-generic/rwonce.h:36:2: error: implicit declaration of function 'compiletime_assert' [-Werror=implicit-function-declaration] | 36 | compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ | | ^~~~~~~~~~~~~~~~~~ | ./include/asm-generic/rwonce.h:49:2: note: in expansion of macro 'compiletime_assert_rwonce_type' | 49 | compiletime_assert_rwonce_type(x); \ | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ./arch/parisc/include/asm/atomic.h:73:9: note: in expansion of macro 'READ_ONCE' | 73 | return READ_ONCE((v)->counter); | | ^~~~~~~~~ Move these macros into compiler_types.h, so that they are available to READ_ONCE() and friends. Link: http://lists.infradead.org/pipermail/linux-arm-kernel/2020-July/587094.html Reported-by: kernel test robot Signed-off-by: Will Deacon --- include/linux/compiler.h | 41 ----------------------------------------- 1 file changed, 41 deletions(-) (limited to 'include/linux/compiler.h') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index f075a3df4fe2..59f7194fdf08 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -273,47 +273,6 @@ static inline void *offset_to_ptr(const int *off) #endif /* __ASSEMBLY__ */ -/* Compile time object size, -1 for unknown */ -#ifndef __compiletime_object_size -# define __compiletime_object_size(obj) -1 -#endif -#ifndef __compiletime_warning -# define __compiletime_warning(message) -#endif -#ifndef __compiletime_error -# define __compiletime_error(message) -#endif - -#ifdef __OPTIMIZE__ -# define __compiletime_assert(condition, msg, prefix, suffix) \ - do { \ - extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (!(condition)) \ - prefix ## suffix(); \ - } while (0) -#else -# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) -#endif - -#define _compiletime_assert(condition, msg, prefix, suffix) \ - __compiletime_assert(condition, msg, prefix, suffix) - -/** - * compiletime_assert - break build and emit msg if condition is false - * @condition: a compile-time constant condition to check - * @msg: a message to emit if condition is false - * - * In tradition of POSIX assert, this macro will break the build if the - * supplied condition is *false*, emitting the supplied error message if the - * compiler has support to do so. - */ -#define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) - -#define compiletime_assert_atomic_type(t) \ - compiletime_assert(__native_word(t), \ - "Need native word sized stores/loads for atomicity.") - /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) -- cgit From d19e789f068b3d633cbac430764962f404198022 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 24 Jul 2020 13:50:25 +0200 Subject: compiler.h: Move instrumentation_begin()/end() to new header Linus pointed out that compiler.h - which is a key header that gets included in every single one of the 28,000+ kernel files during a kernel build - was bloated in: 655389666643: ("vmlinux.lds.h: Create section for protection against instrumentation") Linus noted: > I have pulled this, but do we really want to add this to a header file > that is _so_ core that it gets included for basically every single > file built? > > I don't even see those instrumentation_begin/end() things used > anywhere right now. > > It seems excessive. That 53 lines is maybe not a lot, but it pushed > that header file to over 12kB, and while it's mostly comments, it's > extra IO and parsing basically for _every_ single file compiled in the > kernel. > > For what appears to be absolutely zero upside right now, and I really > don't see why this should be in such a core header file! Move these primitives into a new header: , and include that header in the headers that make use of it. Unfortunately one of these headers is asm-generic/bug.h, which does get included in a lot of places, similarly to compiler.h. So the de-bloating effect isn't as good as we'd like it to be - but at least the interfaces are defined separately. No change to functionality intended. Reported-by: Linus Torvalds Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200604071921.GA1361070@gmail.com Cc: Thomas Gleixner Cc: Borislav Petkov Cc: Peter Zijlstra --- include/linux/compiler.h | 53 ------------------------------------------------ 1 file changed, 53 deletions(-) (limited to 'include/linux/compiler.h') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 204e76856435..681894bfde99 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -120,65 +120,12 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Annotate a C jump table to allow objtool to follow the code flow */ #define __annotate_jump_table __section(.rodata..c_jump_table) -#ifdef CONFIG_DEBUG_ENTRY -/* Begin/end of an instrumentation safe region */ -#define instrumentation_begin() ({ \ - asm volatile("%c0: nop\n\t" \ - ".pushsection .discard.instr_begin\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ -}) - -/* - * Because instrumentation_{begin,end}() can nest, objtool validation considers - * _begin() a +1 and _end() a -1 and computes a sum over the instructions. - * When the value is greater than 0, we consider instrumentation allowed. - * - * There is a problem with code like: - * - * noinstr void foo() - * { - * instrumentation_begin(); - * ... - * if (cond) { - * instrumentation_begin(); - * ... - * instrumentation_end(); - * } - * bar(); - * instrumentation_end(); - * } - * - * If instrumentation_end() would be an empty label, like all the other - * annotations, the inner _end(), which is at the end of a conditional block, - * would land on the instruction after the block. - * - * If we then consider the sum of the !cond path, we'll see that the call to - * bar() is with a 0-value, even though, we meant it to happen with a positive - * value. - * - * To avoid this, have _end() be a NOP instruction, this ensures it will be - * part of the condition block and does not escape. - */ -#define instrumentation_end() ({ \ - asm volatile("%c0: nop\n\t" \ - ".pushsection .discard.instr_end\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ -}) -#endif /* CONFIG_DEBUG_ENTRY */ - #else #define annotate_reachable() #define annotate_unreachable() #define __annotate_jump_table #endif -#ifndef instrumentation_begin -#define instrumentation_begin() do { } while(0) -#define instrumentation_end() do { } while(0) -#endif - #ifndef ASM_UNREACHABLE # define ASM_UNREACHABLE #endif -- cgit