summaryrefslogtreecommitdiff
path: root/include/linux/compiler.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-11-19 19:54:39 +0100
committerIngo Molnar <mingo@kernel.org>2019-11-19 19:56:28 +0100
commit8e1d58ae0c8d4af9ab0141f7e8a9ca95720df01c (patch)
tree7b09138ee1aca2825f86f5534bfd2caad83190ec /include/linux/compiler.h
parentaf42d3466bdc8f39806b26f593604fdc54140bcb (diff)
parent40d04110f87940b6a03bf0aa19cd29e84f465f20 (diff)
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into locking/kcsan
Pull the KCSAN subsystem from Paul E. McKenney: "This pull request contains base kernel concurrency sanitizer (KCSAN) enablement for x86, courtesy of Marco Elver. KCSAN is a sampling watchpoint-based data-race detector, and is documented in Documentation/dev-tools/kcsan.rst. KCSAN was announced in September, and much feedback has since been incorporated: http://lkml.kernel.org/r/CANpmjNPJ_bHjfLZCAPV23AXFfiPiyXXqqu72n6TgWzb2Gnu1eA@mail.gmail.com The data races located thus far have resulted in a number of fixes: https://github.com/google/ktsan/wiki/KCSAN#upstream-fixes-of-data-races-found-by-kcsan Additional information may be found here: https://lore.kernel.org/lkml/20191114180303.66955-1-elver@google.com/ " Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r--include/linux/compiler.h57
1 files changed, 49 insertions, 8 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5e88e7e33abe..7d3e77781578 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -178,6 +178,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
#include <uapi/linux/types.h>
+#include <linux/kcsan-checks.h>
#define __READ_ONCE_SIZE \
({ \
@@ -193,12 +194,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
} \
})
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
#ifdef CONFIG_KASAN
/*
* We can't declare function 'inline' because __no_sanitize_address confilcts
@@ -207,18 +202,44 @@ void __read_once_size(const volatile void *p, void *res, int size)
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
#else
# define __no_kasan_or_inline __always_inline
#endif
-static __no_kasan_or_inline
+#ifdef __SANITIZE_THREAD__
+/*
+ * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
+ * compilation units where instrumentation is disabled.
+ */
+# define __no_kcsan_or_inline __no_sanitize_thread notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan_or_inline
+#else
+# define __no_kcsan_or_inline __always_inline
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
+static __no_kcsan_or_inline
+void __read_once_size(const volatile void *p, void *res, int size)
+{
+ kcsan_check_atomic_read(p, size);
+ __READ_ONCE_SIZE;
+}
+
+static __no_sanitize_or_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+static __no_kcsan_or_inline
+void __write_once_size(volatile void *p, void *res, int size)
{
+ kcsan_check_atomic_write(p, size);
+
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
@@ -289,6 +310,26 @@ unsigned long read_word_at_a_time(const void *addr)
__u.__val; \
})
+#include <linux/kcsan.h>
+
+/*
+ * data_race: macro to document that accesses in an expression may conflict with
+ * other concurrent accesses resulting in data races, but the resulting
+ * behaviour is deemed safe regardless.
+ *
+ * This macro *does not* affect normal code generation, but is a hint to tooling
+ * that data races here should be ignored.
+ */
+#define data_race(expr) \
+ ({ \
+ typeof(({ expr; })) __val; \
+ kcsan_nestable_atomic_begin(); \
+ __val = ({ expr; }); \
+ kcsan_nestable_atomic_end(); \
+ __val; \
+ })
+#else
+
#endif /* __KERNEL__ */
/*