diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-03 17:24:22 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-03 17:24:22 -0700 |
commit | d0989d01c66fed6a741820a96b8cca6688f183ff (patch) | |
tree | 8454b0329481fec3c2ff8fa6663fd544d8bcd919 /lib/overflow_kunit.c | |
parent | 865dad2022c52ac6c5c9a87c5cec78a69f633fb6 (diff) | |
parent | 2120635108b35ecad9c59c8b44f6cbdf4f98214e (diff) |
Merge tag 'hardening-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull kernel hardening updates from Kees Cook:
"Most of the collected changes here are fixes across the tree for
various hardening features (details noted below).
The most notable new feature here is the addition of the memcpy()
overflow warning (under CONFIG_FORTIFY_SOURCE), which is the next step
on the path to killing the common class of "trivially detectable"
buffer overflow conditions (i.e. on arrays with sizes known at compile
time) that have resulted in many exploitable vulnerabilities over the
years (e.g. BleedingTooth).
This feature is expected to still have some undiscovered false
positives. It's been in -next for a full development cycle and all the
reported false positives have been fixed in their respective trees.
All the known-bad code patterns we could find with Coccinelle are also
either fixed in their respective trees or in flight.
The commit message in commit 54d9469bc515 ("fortify: Add run-time WARN
for cross-field memcpy()") for the feature has extensive details, but
I'll repeat here that this is a warning _only_, and is not intended to
actually block overflows (yet). The many patches fixing array sizes
and struct members have been landing for several years now, and we're
finally able to turn this on to find any remaining stragglers.
Summary:
Various fixes across several hardening areas:
- loadpin: Fix verity target enforcement (Matthias Kaehlcke).
- zero-call-used-regs: Add missing clobbers in paravirt (Bill
Wendling).
- CFI: clean up sparc function pointer type mismatches (Bart Van
Assche).
- Clang: Adjust compiler flag detection for various Clang changes
(Sami Tolvanen, Kees Cook).
- fortify: Fix warnings in arch-specific code in sh, ARM, and xen.
Improvements to existing features:
- testing: improve overflow KUnit test, introduce fortify KUnit test,
add more coverage to LKDTM tests (Bart Van Assche, Kees Cook).
- overflow: Relax overflow type checking for wider utility.
New features:
- string: Introduce strtomem() and strtomem_pad() to fill a gap in
strncpy() replacement needs.
- um: Enable FORTIFY_SOURCE support.
- fortify: Enable run-time struct member memcpy() overflow warning"
* tag 'hardening-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (27 commits)
Makefile.extrawarn: Move -Wcast-function-type-strict to W=1
hardening: Remove Clang's enable flag for -ftrivial-auto-var-init=zero
sparc: Unbreak the build
x86/paravirt: add extra clobbers with ZERO_CALL_USED_REGS enabled
x86/paravirt: clean up typos and grammaros
fortify: Convert to struct vs member helpers
fortify: Explicitly check bounds are compile-time constants
x86/entry: Work around Clang __bdos() bug
ARM: decompressor: Include .data.rel.ro.local
fortify: Adjust KUnit test for modular build
sh: machvec: Use char[] for section boundaries
kunit/memcpy: Avoid pathological compile-time string size
lib: Improve the is_signed_type() kunit test
LoadPin: Require file with verity root digests to have a header
dm: verity-loadpin: Only trust verity targets with enforcement
LoadPin: Fix Kconfig doc about format of file with verity digests
um: Enable FORTIFY_SOURCE
lkdtm: Update tests for memcpy() run-time warnings
fortify: Add run-time WARN for cross-field memcpy()
fortify: Use SIZE_MAX instead of (size_t)-1
...
Diffstat (limited to 'lib/overflow_kunit.c')
-rw-r--r-- | lib/overflow_kunit.c | 177 |
1 files changed, 122 insertions, 55 deletions
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 7e3e43679b73..f385ca652b74 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -16,12 +16,15 @@ #include <linux/types.h> #include <linux/vmalloc.h> -#define DEFINE_TEST_ARRAY(t) \ - static const struct test_ ## t { \ - t a, b; \ - t sum, diff, prod; \ - bool s_of, d_of, p_of; \ - } t ## _tests[] +#define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \ + static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \ + t1 a; \ + t2 b; \ + t sum, diff, prod; \ + bool s_of, d_of, p_of; \ + } t1 ## _ ## t2 ## __ ## t ## _tests[] + +#define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t) DEFINE_TEST_ARRAY(u8) = { {0, 0, 0, 0, 0, false, false, false}, @@ -222,21 +225,27 @@ DEFINE_TEST_ARRAY(s64) = { }; #endif -#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ - t _r; \ - bool _of; \ - \ - _of = check_ ## op ## _overflow(a, b, &_r); \ - KUNIT_EXPECT_EQ_MSG(test, _of, of, \ +#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ + int _a_orig = a, _a_bump = a + 1; \ + int _b_orig = b, _b_bump = b + 1; \ + bool _of; \ + t _r; \ + \ + _of = check_ ## op ## _overflow(a, b, &_r); \ + KUNIT_EXPECT_EQ_MSG(test, _of, of, \ "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \ - a, b, of ? "" : " not", #t); \ - KUNIT_EXPECT_EQ_MSG(test, _r, r, \ + a, b, of ? "" : " not", #t); \ + KUNIT_EXPECT_EQ_MSG(test, _r, r, \ "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ - a, b, r, _r, #t); \ + a, b, r, _r, #t); \ + /* Check for internal macro side-effects. */ \ + _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \ + KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \ + KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \ } while (0) -#define DEFINE_TEST_FUNC(t, fmt) \ -static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ +#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \ +static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \ { \ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ @@ -245,15 +254,18 @@ static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ } \ \ -static void t ## _overflow_test(struct kunit *test) { \ +static void n ## _overflow_test(struct kunit *test) { \ unsigned i; \ \ - for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ - do_test_ ## t(test, &t ## _tests[i]); \ + for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \ + do_test_ ## n(test, &n ## _tests[i]); \ kunit_info(test, "%zu %s arithmetic tests finished\n", \ - ARRAY_SIZE(t ## _tests), #t); \ + ARRAY_SIZE(n ## _tests), #n); \ } +#define DEFINE_TEST_FUNC(t, fmt) \ + DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt) + DEFINE_TEST_FUNC(u8, "%d"); DEFINE_TEST_FUNC(s8, "%d"); DEFINE_TEST_FUNC(u16, "%d"); @@ -265,9 +277,32 @@ DEFINE_TEST_FUNC(u64, "%llu"); DEFINE_TEST_FUNC(s64, "%lld"); #endif -static void overflow_shift_test(struct kunit *test) -{ - int count = 0; +DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = { + {0, 0, 0, 0, 0, false, false, false}, + {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true}, + {U8_MAX + 1, 0, 0, 0, 0, true, true, false}, +}; +DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d"); + +DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = { + {0, 0, 0, 0, 0, false, false, false}, + {U32_MAX, 0, -1, -1, 0, true, true, false}, +}; +DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d"); + +DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = { + {0, 0, 0, 0, 0, false, false, false}, + {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false}, + {1, 2, 3, -1, 2, false, false, false}, +}; +DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d"); + +DEFINE_TEST_ARRAY_TYPED(int, int, u8) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 2, 3, U8_MAX, 2, false, true, false}, + {-1, 0, U8_MAX, U8_MAX, 0, true, true, false}, +}; +DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d"); /* Args are: value, shift, type, expected result, overflow expected */ #define TEST_ONE_SHIFT(a, s, t, expect, of) do { \ @@ -292,6 +327,10 @@ static void overflow_shift_test(struct kunit *test) count++; \ } while (0) +static void shift_sane_test(struct kunit *test) +{ + int count = 0; + /* Sane shifts. */ TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); @@ -334,6 +373,13 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0, 30, s32, 0, false); TEST_ONE_SHIFT(0, 62, s64, 0, false); + kunit_info(test, "%d sane shift tests finished\n", count); +} + +static void shift_overflow_test(struct kunit *test) +{ + int count = 0; + /* Overflow: shifted the bit off the end. */ TEST_ONE_SHIFT(1, 8, u8, 0, true); TEST_ONE_SHIFT(1, 16, u16, 0, true); @@ -381,6 +427,13 @@ static void overflow_shift_test(struct kunit *test) /* 0100000100001000001000000010000001000010000001000100010001001011 */ TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); + kunit_info(test, "%d overflow shift tests finished\n", count); +} + +static void shift_truncate_test(struct kunit *test) +{ + int count = 0; + /* Overflow: values larger than destination type. */ TEST_ONE_SHIFT(0x100, 0, u8, 0, true); TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); @@ -392,6 +445,33 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); + /* Overflow: shifted at or beyond entire type's bit width. */ + TEST_ONE_SHIFT(0, 8, u8, 0, true); + TEST_ONE_SHIFT(0, 9, u8, 0, true); + TEST_ONE_SHIFT(0, 8, s8, 0, true); + TEST_ONE_SHIFT(0, 9, s8, 0, true); + TEST_ONE_SHIFT(0, 16, u16, 0, true); + TEST_ONE_SHIFT(0, 17, u16, 0, true); + TEST_ONE_SHIFT(0, 16, s16, 0, true); + TEST_ONE_SHIFT(0, 17, s16, 0, true); + TEST_ONE_SHIFT(0, 32, u32, 0, true); + TEST_ONE_SHIFT(0, 33, u32, 0, true); + TEST_ONE_SHIFT(0, 32, int, 0, true); + TEST_ONE_SHIFT(0, 33, int, 0, true); + TEST_ONE_SHIFT(0, 32, s32, 0, true); + TEST_ONE_SHIFT(0, 33, s32, 0, true); + TEST_ONE_SHIFT(0, 64, u64, 0, true); + TEST_ONE_SHIFT(0, 65, u64, 0, true); + TEST_ONE_SHIFT(0, 64, s64, 0, true); + TEST_ONE_SHIFT(0, 65, s64, 0, true); + + kunit_info(test, "%d truncate shift tests finished\n", count); +} + +static void shift_nonsense_test(struct kunit *test) +{ + int count = 0; + /* Nonsense: negative initial value. */ TEST_ONE_SHIFT(-1, 0, s8, 0, true); TEST_ONE_SHIFT(-1, 0, u8, 0, true); @@ -416,26 +496,6 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0, -30, s64, 0, true); TEST_ONE_SHIFT(0, -30, u64, 0, true); - /* Overflow: shifted at or beyond entire type's bit width. */ - TEST_ONE_SHIFT(0, 8, u8, 0, true); - TEST_ONE_SHIFT(0, 9, u8, 0, true); - TEST_ONE_SHIFT(0, 8, s8, 0, true); - TEST_ONE_SHIFT(0, 9, s8, 0, true); - TEST_ONE_SHIFT(0, 16, u16, 0, true); - TEST_ONE_SHIFT(0, 17, u16, 0, true); - TEST_ONE_SHIFT(0, 16, s16, 0, true); - TEST_ONE_SHIFT(0, 17, s16, 0, true); - TEST_ONE_SHIFT(0, 32, u32, 0, true); - TEST_ONE_SHIFT(0, 33, u32, 0, true); - TEST_ONE_SHIFT(0, 32, int, 0, true); - TEST_ONE_SHIFT(0, 33, int, 0, true); - TEST_ONE_SHIFT(0, 32, s32, 0, true); - TEST_ONE_SHIFT(0, 33, s32, 0, true); - TEST_ONE_SHIFT(0, 64, u64, 0, true); - TEST_ONE_SHIFT(0, 65, u64, 0, true); - TEST_ONE_SHIFT(0, 64, s64, 0, true); - TEST_ONE_SHIFT(0, 65, s64, 0, true); - /* * Corner case: for unsigned types, we fail when we've shifted * through the entire width of bits. For signed types, we might @@ -451,9 +511,9 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0, 31, s32, 0, false); TEST_ONE_SHIFT(0, 63, s64, 0, false); - kunit_info(test, "%d shift tests finished\n", count); -#undef TEST_ONE_SHIFT + kunit_info(test, "%d nonsense shift tests finished\n", count); } +#undef TEST_ONE_SHIFT /* * Deal with the various forms of allocator arguments. See comments above @@ -649,18 +709,25 @@ static void overflow_size_helpers_test(struct kunit *test) } static struct kunit_case overflow_test_cases[] = { - KUNIT_CASE(u8_overflow_test), - KUNIT_CASE(s8_overflow_test), - KUNIT_CASE(u16_overflow_test), - KUNIT_CASE(s16_overflow_test), - KUNIT_CASE(u32_overflow_test), - KUNIT_CASE(s32_overflow_test), + KUNIT_CASE(u8_u8__u8_overflow_test), + KUNIT_CASE(s8_s8__s8_overflow_test), + KUNIT_CASE(u16_u16__u16_overflow_test), + KUNIT_CASE(s16_s16__s16_overflow_test), + KUNIT_CASE(u32_u32__u32_overflow_test), + KUNIT_CASE(s32_s32__s32_overflow_test), /* Clang 13 and earlier generate unwanted libcalls on 32-bit. */ #if BITS_PER_LONG == 64 - KUNIT_CASE(u64_overflow_test), - KUNIT_CASE(s64_overflow_test), + KUNIT_CASE(u64_u64__u64_overflow_test), + KUNIT_CASE(s64_s64__s64_overflow_test), #endif - KUNIT_CASE(overflow_shift_test), + KUNIT_CASE(u32_u32__u8_overflow_test), + KUNIT_CASE(u32_u32__int_overflow_test), + KUNIT_CASE(u8_u8__int_overflow_test), + KUNIT_CASE(int_int__u8_overflow_test), + KUNIT_CASE(shift_sane_test), + KUNIT_CASE(shift_overflow_test), + KUNIT_CASE(shift_truncate_test), + KUNIT_CASE(shift_nonsense_test), KUNIT_CASE(overflow_allocation_test), KUNIT_CASE(overflow_size_helpers_test), {} |