diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2020-12-16 11:41:05 +0100 |
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2020-12-16 11:41:05 +0100 |
| commit | e77bc7dc9af0ec53996367b2053dfafee83b7edb (patch) | |
| tree | 7850cb0cc9e0d7308992b2b983052c5f209245bd /lib | |
| parent | 105856b36c0cefc2fa1c1e649d75da71e2e38c31 (diff) | |
| parent | 82514ecd61435c2d47c235e1343872b38db17be4 (diff) | |
Merge branch 'for-5.11/elecom' into for-linus
- support for EX-G M-XGL20DLBK device, from YOSHIOKA Takuma
Diffstat (limited to 'lib')
52 files changed, 1270 insertions, 494 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 491789a793ae..c789b39ed527 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -212,9 +212,10 @@ config DEBUG_INFO If unsure, say N. +if DEBUG_INFO + config DEBUG_INFO_REDUCED bool "Reduce debugging information" - depends on DEBUG_INFO help If you say Y here gcc is instructed to generate less debugging information for structure types. This means that tools that @@ -227,7 +228,6 @@ config DEBUG_INFO_REDUCED config DEBUG_INFO_COMPRESSED bool "Compressed debugging information" - depends on DEBUG_INFO depends on $(cc-option,-gz=zlib) depends on $(ld-option,--compress-debug-sections=zlib) help @@ -243,7 +243,6 @@ config DEBUG_INFO_COMPRESSED config DEBUG_INFO_SPLIT bool "Produce split debuginfo in .dwo files" - depends on DEBUG_INFO depends on $(cc-option,-gsplit-dwarf) help Generate debug info into separate .dwo files. This significantly @@ -259,7 +258,6 @@ config DEBUG_INFO_SPLIT config DEBUG_INFO_DWARF4 bool "Generate dwarf4 debuginfo" - depends on DEBUG_INFO depends on $(cc-option,-gdwarf-4) help Generate dwarf4 debug info. This requires recent versions @@ -269,7 +267,6 @@ config DEBUG_INFO_DWARF4 config DEBUG_INFO_BTF bool "Generate BTF typeinfo" - depends on DEBUG_INFO depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST help @@ -279,7 +276,6 @@ config DEBUG_INFO_BTF config GDB_SCRIPTS bool "Provide GDB scripts for kernel debugging" - depends on DEBUG_INFO help This creates the required links to GDB helper scripts in the build directory. If you load vmlinux into gdb, the helper @@ -288,6 +284,8 @@ config GDB_SCRIPTS instance. See Documentation/dev-tools/gdb-kernel-debugging.rst for further details. +endif # DEBUG_INFO + config ENABLE_MUST_CHECK bool "Enable __must_check logic" default y @@ -1367,6 +1365,27 @@ config WW_MUTEX_SELFTEST Say M if you want these self tests to build as a module. Say N if you are unsure. +config SCF_TORTURE_TEST + tristate "torture tests for smp_call_function*()" + depends on DEBUG_KERNEL + select TORTURE_TEST + help + This option provides a kernel module that runs torture tests + on the smp_call_function() family of primitives. The kernel + module may be built after the fact on the running kernel to + be tested, if desired. + +config CSD_LOCK_WAIT_DEBUG + bool "Debugging for csd_lock_wait(), called from smp_call_function*()" + depends on DEBUG_KERNEL + depends on 64BIT + default n + help + This option enables debug prints when CPUs are slow to respond + to the smp_call_function*() IPI wrappers. These debug prints + include the IPI handler function currently executing (if any) + and relevant stack traces. + endmenu # lock debugging config TRACE_IRQFLAGS @@ -1768,6 +1787,13 @@ config FAIL_PAGE_ALLOC help Provide fault-injection capability for alloc_pages(). +config FAULT_INJECTION_USERCOPY + bool "Fault injection capability for usercopy functions" + depends on FAULT_INJECTION + help + Provides fault-injection capability to inject failures + in usercopy functions (copy_from_user(), get_user(), ...). + config FAIL_MAKE_REQUEST bool "Fault-injection capability for disk IO" depends on FAULT_INJECTION && BLOCK @@ -2035,13 +2061,6 @@ config TEST_BITMAP If unsure, say N. -config TEST_BITFIELD - tristate "Test bitfield functions at runtime" - help - Enable this option to test the bitfield functions at boot. - - If unsure, say N. - config TEST_UUID tristate "Test functions located in the uuid module at runtime" @@ -2191,6 +2210,22 @@ config TEST_SYSCTL If unsure, say N. +config BITFIELD_KUNIT + tristate "KUnit test bitfield functions at runtime" + depends on KUNIT + help + Enable this option to test the bitfield functions at boot. + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config SYSCTL_KUNIT_TEST tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS depends on KUNIT @@ -2411,4 +2446,6 @@ config HYPERV_TESTING endmenu # "Kernel Testing and Coverage" +source "Documentation/Kconfig" + endmenu # Kernel hacking diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 256f2486f9bd..05dae05b6cc9 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -24,6 +24,21 @@ menuconfig KGDB if KGDB +config KGDB_HONOUR_BLOCKLIST + bool "KGDB: use kprobe blocklist to prohibit unsafe breakpoints" + depends on HAVE_KPROBES + depends on MODULES + select KPROBES + default y + help + If set to Y the debug core will use the kprobe blocklist to + identify symbols where it is unsafe to set breakpoints. + In particular this disallows instrumentation of functions + called during debug trap handling and thus makes it very + difficult to inadvertently provoke recursive trap handling. + + If unsure, say Y. + config KGDB_SERIAL_CONSOLE tristate "KGDB: use kgdb over the serial console" select CONSOLE_POLL diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 774315de555a..58f8d03d037b 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -47,6 +47,20 @@ config UBSAN_BOUNDS to the {str,mem}*cpy() family of functions (that is addressed by CONFIG_FORTIFY_SOURCE). +config UBSAN_LOCAL_BOUNDS + bool "Perform array local bounds checking" + depends on UBSAN_TRAP + depends on CC_IS_CLANG + depends on !UBSAN_KCOV_BROKEN + help + This option enables -fsanitize=local-bounds which traps when an + exception/error is detected. Therefore, it should be enabled only + if trapping is expected. + Enabling this option detects errors due to accesses through a + pointer that is derived from an object of a statically-known size, + where an added offset (which may not be known statically) is + out-of-bounds. + config UBSAN_MISC bool "Enable all other Undefined Behavior sanity checks" default UBSAN diff --git a/lib/Makefile b/lib/Makefile index 49a2a9e36224..ce45af50983a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -87,7 +87,6 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o -obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o obj-$(CONFIG_TEST_UUID) += test_uuid.o obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o @@ -210,6 +209,7 @@ obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o +obj-$(CONFIG_FAULT_INJECTION_USERCOPY) += fault-inject-usercopy.o obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o @@ -348,6 +348,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o obj-$(CONFIG_PLDMFW) += pldmfw/ # KUnit tests +obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o obj-$(CONFIG_BITS_TEST) += test_bits.o diff --git a/lib/test_bitfield.c b/lib/bitfield_kunit.c index 5b8f4108662d..1473d8b4bf0f 100644 --- a/lib/test_bitfield.c +++ b/lib/bitfield_kunit.c @@ -5,8 +5,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> +#include <kunit/test.h> #include <linux/bitfield.h> #define CHECK_ENC_GET_U(tp, v, field, res) do { \ @@ -14,13 +13,11 @@ u##tp _res; \ \ _res = u##tp##_encode_bits(v, field); \ - if (_res != res) { \ - pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\ - (u64)_res); \ - return -EINVAL; \ - } \ - if (u##tp##_get_bits(_res, field) != v) \ - return -EINVAL; \ + KUNIT_ASSERT_FALSE_MSG(context, _res != res, \ + "u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n", \ + (u64)_res); \ + KUNIT_ASSERT_FALSE(context, \ + u##tp##_get_bits(_res, field) != v); \ } \ } while (0) @@ -29,14 +26,13 @@ __le##tp _res; \ \ _res = le##tp##_encode_bits(v, field); \ - if (_res != cpu_to_le##tp(res)) { \ - pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\ - (u64)le##tp##_to_cpu(_res), \ - (u64)(res)); \ - return -EINVAL; \ - } \ - if (le##tp##_get_bits(_res, field) != v) \ - return -EINVAL; \ + KUNIT_ASSERT_FALSE_MSG(context, \ + _res != cpu_to_le##tp(res), \ + "le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx",\ + (u64)le##tp##_to_cpu(_res), \ + (u64)(res)); \ + KUNIT_ASSERT_FALSE(context, \ + le##tp##_get_bits(_res, field) != v);\ } \ } while (0) @@ -45,14 +41,13 @@ __be##tp _res; \ \ _res = be##tp##_encode_bits(v, field); \ - if (_res != cpu_to_be##tp(res)) { \ - pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\ - (u64)be##tp##_to_cpu(_res), \ - (u64)(res)); \ - return -EINVAL; \ - } \ - if (be##tp##_get_bits(_res, field) != v) \ - return -EINVAL; \ + KUNIT_ASSERT_FALSE_MSG(context, \ + _res != cpu_to_be##tp(res), \ + "be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx", \ + (u64)be##tp##_to_cpu(_res), \ + (u64)(res)); \ + KUNIT_ASSERT_FALSE(context, \ + be##tp##_get_bits(_res, field) != v);\ } \ } while (0) @@ -62,7 +57,7 @@ CHECK_ENC_GET_BE(tp, v, field, res); \ } while (0) -static int test_constants(void) +static void __init test_bitfields_constants(struct kunit *context) { /* * NOTE @@ -95,19 +90,17 @@ static int test_constants(void) CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull); CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull); CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull); - - return 0; } #define CHECK(tp, mask) do { \ u64 v; \ \ for (v = 0; v < 1 << hweight32(mask); v++) \ - if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \ - return -EINVAL; \ + KUNIT_ASSERT_FALSE(context, \ + tp##_encode_bits(v, mask) != v << __ffs64(mask));\ } while (0) -static int test_variables(void) +static void __init test_bitfields_variables(struct kunit *context) { CHECK(u8, 0x0f); CHECK(u8, 0xf0); @@ -130,39 +123,32 @@ static int test_variables(void) CHECK(u64, 0x000000007f000000ull); CHECK(u64, 0x0000000018000000ull); CHECK(u64, 0x0000001f8000000ull); - - return 0; } -static int __init test_bitfields(void) -{ - int ret = test_constants(); - - if (ret) { - pr_warn("constant tests failed!\n"); - return ret; - } - - ret = test_variables(); - if (ret) { - pr_warn("variable tests failed!\n"); - return ret; - } - #ifdef TEST_BITFIELD_COMPILE +static void __init test_bitfields_compile(struct kunit *context) +{ /* these should fail compilation */ CHECK_ENC_GET(16, 16, 0x0f00, 0x1000); u32_encode_bits(7, 0x06000000); /* this should at least give a warning */ u16_encode_bits(0, 0x60000); +} #endif - pr_info("tests passed\n"); +static struct kunit_case __refdata bitfields_test_cases[] = { + KUNIT_CASE(test_bitfields_constants), + KUNIT_CASE(test_bitfields_variables), + {} +}; - return 0; -} -module_init(test_bitfields) +static struct kunit_suite bitfields_test_suite = { + .name = "bitfields", + .test_cases = bitfields_test_cases, +}; + +kunit_test_suites(&bitfields_test_suite); MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); MODULE_LICENSE("GPL"); diff --git a/lib/bitmap.c b/lib/bitmap.c index c13d859bc7ab..75006c4036e9 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -23,7 +23,7 @@ /** * DOC: bitmap introduction * - * bitmaps provide an array of bits, implemented using an an + * bitmaps provide an array of bits, implemented using an * array of unsigned longs. The number of valid bits in a * given bitmap does _not_ need to be an exact multiple of * BITS_PER_LONG. @@ -552,7 +552,7 @@ static inline bool end_of_region(char c) } /* - * The format allows commas and whitespases at the beginning + * The format allows commas and whitespaces at the beginning * of the region. */ static const char *bitmap_find_region(const char *str) diff --git a/lib/crc32.c b/lib/crc32.c index 35a03d03f973..2a68dfd3b96c 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, return crc; } -#if CRC_LE_BITS == 1 +#if CRC_BE_BITS == 1 u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE); diff --git a/lib/crc32test.c b/lib/crc32test.c index 97d6a57cefcc..61ddce2cff77 100644 --- a/lib/crc32test.c +++ b/lib/crc32test.c @@ -683,7 +683,6 @@ static int __init crc32c_test(void) /* reduce OS noise */ local_irq_save(flags); - local_irq_disable(); nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { @@ -694,7 +693,6 @@ static int __init crc32c_test(void) nsec = ktime_get_ns() - nsec; local_irq_restore(flags); - local_irq_enable(); pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); @@ -768,7 +766,6 @@ static int __init crc32_test(void) /* reduce OS noise */ local_irq_save(flags); - local_irq_disable(); nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { @@ -783,7 +780,6 @@ static int __init crc32_test(void) nsec = ktime_get_ns() - nsec; local_irq_restore(flags); - local_irq_enable(); pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", CRC_LE_BITS, CRC_BE_BITS); diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index f9628f3924ce..c72c865032fa 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -390,7 +390,7 @@ static int INIT get_next_block(struct bunzip_data *bd) j = (bd->inbufBits >> bd->inbufBitCount)& ((1 << hufGroup->maxLen)-1); got_huff_bits: - /* Figure how how many bits are in next symbol and + /* Figure how many bits are in next symbol and * unget extras */ i = hufGroup->minLen; while (j > limit[i]) diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c index 0ad2c15479ed..790abc472f5b 100644 --- a/lib/decompress_unzstd.c +++ b/lib/decompress_unzstd.c @@ -178,8 +178,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len, int err; size_t ret; + /* + * ZSTD decompression code won't be happy if the buffer size is so big + * that its end address overflows. When the size is not provided, make + * it as big as possible without having the end address overflow. + */ if (out_len == 0) - out_len = LONG_MAX; /* no limit */ + out_len = UINTPTR_MAX - (uintptr_t)out_buf; if (fill == NULL && flush == NULL) /* diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index e659a027036e..fde0aa244148 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -60,8 +60,8 @@ void dql_completed(struct dql *dql, unsigned int count) * A decrease is only considered if the queue has been busy in * the whole interval (the check above). * - * If there is slack, the amount of execess data queued above - * the the amount needed to prevent starvation, the queue limit + * If there is slack, the amount of excess data queued above + * the amount needed to prevent starvation, the queue limit * can be decreased. To avoid hysteresis we consider the * minimum amount of slack found over several iterations of the * completion routine. diff --git a/lib/earlycpio.c b/lib/earlycpio.c index c001e084829e..e83628882001 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c @@ -42,7 +42,7 @@ enum cpio_fields { /** * cpio_data find_cpio_data - Search for files in an uncompressed cpio * @path: The directory to search for, including a slash at the end - * @data: Pointer to the the cpio archive or a header inside + * @data: Pointer to the cpio archive or a header inside * @len: Remaining length of the cpio based on data pointer * @nextoff: When a matching file is found, this is the offset from the * beginning of the cpio to the beginning of the next file, not the diff --git a/lib/fault-inject-usercopy.c b/lib/fault-inject-usercopy.c new file mode 100644 index 000000000000..77558b6c29ca --- /dev/null +++ b/lib/fault-inject-usercopy.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/fault-inject.h> +#include <linux/fault-inject-usercopy.h> + +static struct { + struct fault_attr attr; +} fail_usercopy = { + .attr = FAULT_ATTR_INITIALIZER, +}; + +static int __init setup_fail_usercopy(char *str) +{ + return setup_fault_attr(&fail_usercopy.attr, str); +} +__setup("fail_usercopy=", setup_fail_usercopy); + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init fail_usercopy_debugfs(void) +{ + struct dentry *dir; + + dir = fault_create_debugfs_attr("fail_usercopy", NULL, + &fail_usercopy.attr); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + return 0; +} + +late_initcall(fail_usercopy_debugfs); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +bool should_fail_usercopy(void) +{ + return should_fail(&fail_usercopy.attr, 1); +} +EXPORT_SYMBOL_GPL(should_fail_usercopy); diff --git a/lib/find_bit.c b/lib/find_bit.c index 49f875f1baf7..4a8751010d59 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -16,6 +16,7 @@ #include <linux/bitmap.h> #include <linux/export.h> #include <linux/kernel.h> +#include <linux/minmax.h> #if !defined(find_next_bit) || !defined(find_next_zero_bit) || \ !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \ diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c index 0e2deac97da0..e02f9df24d1e 100644 --- a/lib/fonts/font_10x18.c +++ b/lib/fonts/font_10x18.c @@ -8,7 +8,7 @@ #define FONTDATAMAX 9216 -static struct font_data fontdata_10x18 = { +static const struct font_data fontdata_10x18 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c index 87da8acd07db..6e3c4b7691c8 100644 --- a/lib/fonts/font_6x10.c +++ b/lib/fonts/font_6x10.c @@ -3,7 +3,7 @@ #define FONTDATAMAX 2560 -static struct font_data fontdata_6x10 = { +static const struct font_data fontdata_6x10 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c index 5e975dfa10a5..2d22a24e816f 100644 --- a/lib/fonts/font_6x11.c +++ b/lib/fonts/font_6x11.c @@ -9,7 +9,7 @@ #define FONTDATAMAX (11*256) -static struct font_data fontdata_6x11 = { +static const struct font_data fontdata_6x11 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c index e06447788418..e7442a0d183d 100644 --- a/lib/fonts/font_6x8.c +++ b/lib/fonts/font_6x8.c @@ -3,8 +3,8 @@ #define FONTDATAMAX 2048 -static const unsigned char fontdata_6x8[FONTDATAMAX] = { - +static const struct font_data fontdata_6x8 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 000000 */ 0x00, /* 000000 */ @@ -2564,13 +2564,13 @@ static const unsigned char fontdata_6x8[FONTDATAMAX] = { 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ -}; +} }; const struct font_desc font_6x8 = { .idx = FONT6x8_IDX, .name = "6x8", .width = 6, .height = 8, - .data = fontdata_6x8, + .data = fontdata_6x8.data, .pref = 0, }; diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c index 86d298f38505..9cc7ae2e03f7 100644 --- a/lib/fonts/font_7x14.c +++ b/lib/fonts/font_7x14.c @@ -8,7 +8,7 @@ #define FONTDATAMAX 3584 -static struct font_data fontdata_7x14 = { +static const struct font_data fontdata_7x14 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c index 37cedd36ca5e..bab25dc59e8d 100644 --- a/lib/fonts/font_8x16.c +++ b/lib/fonts/font_8x16.c @@ -10,7 +10,7 @@ #define FONTDATAMAX 4096 -static struct font_data fontdata_8x16 = { +static const struct font_data fontdata_8x16 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c index 8ab695538395..109d0572368f 100644 --- a/lib/fonts/font_8x8.c +++ b/lib/fonts/font_8x8.c @@ -9,7 +9,7 @@ #define FONTDATAMAX 2048 -static struct font_data fontdata_8x8 = { +static const struct font_data fontdata_8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c index 069b3e80c434..fb395f0d4031 100644 --- a/lib/fonts/font_acorn_8x8.c +++ b/lib/fonts/font_acorn_8x8.c @@ -5,7 +5,7 @@ #define FONTDATAMAX 2048 -static struct font_data acorndata_8x8 = { +static const struct font_data acorndata_8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c index 1449876c6a27..592774a90917 100644 --- a/lib/fonts/font_mini_4x6.c +++ b/lib/fonts/font_mini_4x6.c @@ -43,7 +43,7 @@ __END__; #define FONTDATAMAX 1536 -static struct font_data fontdata_mini_4x6 = { +static const struct font_data fontdata_mini_4x6 = { { 0, 0, FONTDATAMAX, 0 }, { /*{*/ /* Char 0: ' ' */ diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c index 32d65551e7ed..a6f95ebce950 100644 --- a/lib/fonts/font_pearl_8x8.c +++ b/lib/fonts/font_pearl_8x8.c @@ -14,7 +14,7 @@ #define FONTDATAMAX 2048 -static struct font_data fontdata_pearl8x8 = { +static const struct font_data fontdata_pearl8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c index 641a6b4dca42..a5b65bd49604 100644 --- a/lib/fonts/font_sun12x22.c +++ b/lib/fonts/font_sun12x22.c @@ -3,7 +3,7 @@ #define FONTDATAMAX 11264 -static struct font_data fontdata_sun12x22 = { +static const struct font_data fontdata_sun12x22 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 000000000000 */ diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c index 193fe6d988e0..e577e76a6a7c 100644 --- a/lib/fonts/font_sun8x16.c +++ b/lib/fonts/font_sun8x16.c @@ -3,7 +3,7 @@ #define FONTDATAMAX 4096 -static struct font_data fontdata_sun8x16 = { +static const struct font_data fontdata_sun8x16 = { { 0, 0, FONTDATAMAX, 0 }, { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c index 91b9c283bd9c..f7c3abb6b99e 100644 --- a/lib/fonts/font_ter16x32.c +++ b/lib/fonts/font_ter16x32.c @@ -4,7 +4,7 @@ #define FONTDATAMAX 16384 -static struct font_data fontdata_ter16x32 = { +static const struct font_data fontdata_ter16x32 = { { 0, 0, FONTDATAMAX, 0 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, diff --git a/lib/hexdump.c b/lib/hexdump.c index 147133f8eb2f..9301578f98e8 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -7,6 +7,7 @@ #include <linux/ctype.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/minmax.h> #include <linux/export.h> #include <asm/unaligned.h> diff --git a/lib/idr.c b/lib/idr.c index c2cf2c52bbde..f4ab4f4aa3c7 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -372,7 +372,8 @@ EXPORT_SYMBOL(idr_replace); * Allocate an ID between @min and @max, inclusive. The allocated ID will * not exceed %INT_MAX, even if @max is larger. * - * Context: Any context. + * Context: Any context. It is safe to call this function without + * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ @@ -470,6 +471,7 @@ alloc: goto retry; nospc: xas_unlock_irqrestore(&xas, flags); + kfree(alloc); return -ENOSPC; } EXPORT_SYMBOL(ida_alloc_range); @@ -479,7 +481,8 @@ EXPORT_SYMBOL(ida_alloc_range); * @ida: IDA handle. * @id: Previously allocated ID. * - * Context: Any context. + * Context: Any context. It is safe to call this function without + * locking in your code. */ void ida_free(struct ida *ida, unsigned int id) { @@ -531,7 +534,8 @@ EXPORT_SYMBOL(ida_free); * or freed. If the IDA is already empty, there is no need to call this * function. * - * Context: Any context. + * Context: Any context. It is safe to call this function without + * locking in your code. */ void ida_destroy(struct ida *ida) { diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 14cae2584db4..1635111c5bd2 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -2,6 +2,7 @@ #include <crypto/hash.h> #include <linux/export.h> #include <linux/bvec.h> +#include <linux/fault-inject-usercopy.h> #include <linux/uio.h> #include <linux/pagemap.h> #include <linux/slab.h> @@ -140,6 +141,8 @@ static int copyout(void __user *to, const void *from, size_t n) { + if (should_fail_usercopy()) + return n; if (access_ok(to, n)) { instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); @@ -149,6 +152,8 @@ static int copyout(void __user *to, const void *from, size_t n) static int copyin(void *to, const void __user *from, size_t n) { + if (should_fail_usercopy()) + return n; if (access_ok(from, n)) { instrument_copy_from_user(to, from, n); n = raw_copy_from_user(to, from, n); diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index 724b94311ca3..c49f4ffb6273 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -3,7 +3,8 @@ obj-$(CONFIG_KUNIT) += kunit.o kunit-objs += test.o \ string-stream.o \ assert.o \ - try-catch.o + try-catch.o \ + executor.o ifeq ($(CONFIG_KUNIT_DEBUGFS),y) kunit-objs += debugfs.o diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c new file mode 100644 index 000000000000..a95742a4ece7 --- /dev/null +++ b/lib/kunit/executor.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <kunit/test.h> + +/* + * These symbols point to the .kunit_test_suites section and are defined in + * include/asm-generic/vmlinux.lds.h, and consequently must be extern. + */ +extern struct kunit_suite * const * const __kunit_suites_start[]; +extern struct kunit_suite * const * const __kunit_suites_end[]; + +#if IS_BUILTIN(CONFIG_KUNIT) + +static void kunit_print_tap_header(void) +{ + struct kunit_suite * const * const *suites, * const *subsuite; + int num_of_suites = 0; + + for (suites = __kunit_suites_start; + suites < __kunit_suites_end; + suites++) + for (subsuite = *suites; *subsuite != NULL; subsuite++) + num_of_suites++; + + pr_info("TAP version 14\n"); + pr_info("1..%d\n", num_of_suites); +} + +int kunit_run_all_tests(void) +{ + struct kunit_suite * const * const *suites; + + kunit_print_tap_header(); + + for (suites = __kunit_suites_start; + suites < __kunit_suites_end; + suites++) + __kunit_test_suites_init(*suites); + + return 0; +} + +#endif /* IS_BUILTIN(CONFIG_KUNIT) */ diff --git a/lib/kunit/test.c b/lib/kunit/test.c index dcc35fd30d95..750704abe89a 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -16,16 +16,6 @@ #include "string-stream.h" #include "try-catch-impl.h" -static void kunit_print_tap_version(void) -{ - static bool kunit_has_printed_tap_version; - - if (!kunit_has_printed_tap_version) { - pr_info("TAP version 14\n"); - kunit_has_printed_tap_version = true; - } -} - /* * Append formatted message to log, size of which is limited to * KUNIT_LOG_SIZE bytes (including null terminating byte). @@ -65,7 +55,6 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases); static void kunit_print_subtest_start(struct kunit_suite *suite) { - kunit_print_tap_version(); kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s", suite->name); kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd", @@ -381,7 +370,7 @@ static void kunit_init_suite(struct kunit_suite *suite) kunit_debugfs_create_suite(suite); } -int __kunit_test_suites_init(struct kunit_suite **suites) +int __kunit_test_suites_init(struct kunit_suite * const * const suites) { unsigned int i; diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 77ab839644c5..5ca0d815a95d 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c @@ -12,7 +12,7 @@ * pages = {}, * month = {June}, *} - * Used by the iSCSI driver, possibly others, and derived from the + * Used by the iSCSI driver, possibly others, and derived from * the iscsi-crc.c module of the linux-iscsi driver at * http://linux-iscsi.sourceforge.net. * diff --git a/lib/math/rational.c b/lib/math/rational.c index df75c8809693..9781d521963d 100644 --- a/lib/math/rational.c +++ b/lib/math/rational.c @@ -11,7 +11,7 @@ #include <linux/rational.h> #include <linux/compiler.h> #include <linux/export.h> -#include <linux/kernel.h> +#include <linux/minmax.h> /* * calculate best rational approximation for a given fraction diff --git a/lib/math/reciprocal_div.c b/lib/math/reciprocal_div.c index bf043258fa00..32436dd4171e 100644 --- a/lib/math/reciprocal_div.c +++ b/lib/math/reciprocal_div.c @@ -4,6 +4,7 @@ #include <asm/div64.h> #include <linux/reciprocal_div.h> #include <linux/export.h> +#include <linux/minmax.h> /* * For a description of the algorithm please have a look at diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c index a5119a2bcdd4..142b680835df 100644 --- a/lib/mpi/mpi-bit.c +++ b/lib/mpi/mpi-bit.c @@ -1,4 +1,4 @@ -/* mpi-bit.c - MPI bit level fucntions +/* mpi-bit.c - MPI bit level functions * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. diff --git a/lib/nlattr.c b/lib/nlattr.c index bc5b5cf608c4..74019c8ebf6b 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -96,8 +96,8 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype, continue; if (nla_len(entry) < NLA_HDRLEN) { - NL_SET_ERR_MSG_ATTR(extack, entry, - "Array element too short"); + NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy, + "Array element too short"); return -ERANGE; } @@ -124,6 +124,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt, range->max = U8_MAX; break; case NLA_U16: + case NLA_BINARY: range->max = U16_MAX; break; case NLA_U32: @@ -140,6 +141,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt, switch (pt->validation_type) { case NLA_VALIDATE_RANGE: + case NLA_VALIDATE_RANGE_WARN_TOO_LONG: range->min = pt->min; range->max = pt->max; break; @@ -157,9 +159,10 @@ void nla_get_range_unsigned(const struct nla_policy *pt, } } -static int nla_validate_int_range_unsigned(const struct nla_policy *pt, - const struct nlattr *nla, - struct netlink_ext_ack *extack) +static int nla_validate_range_unsigned(const struct nla_policy *pt, + const struct nlattr *nla, + struct netlink_ext_ack *extack, + unsigned int validate) { struct netlink_range_validation range; u64 value; @@ -178,15 +181,39 @@ static int nla_validate_int_range_unsigned(const struct nla_policy *pt, case NLA_MSECS: value = nla_get_u64(nla); break; + case NLA_BINARY: + value = nla_len(nla); + break; default: return -EINVAL; } nla_get_range_unsigned(pt, &range); + if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG && + pt->type == NLA_BINARY && value > range.max) { + pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", + current->comm, pt->type); + if (validate & NL_VALIDATE_STRICT_ATTRS) { + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "invalid attribute length"); + return -EINVAL; + } + + /* this assumes min <= max (don't validate against min) */ + return 0; + } + if (value < range.min || value > range.max) { - NL_SET_ERR_MSG_ATTR(extack, nla, - "integer out of range"); + bool binary = pt->type == NLA_BINARY; + + if (binary) + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "binary attribute size out of range"); + else + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "integer out of range"); + return -ERANGE; } @@ -264,8 +291,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt, nla_get_range_signed(pt, &range); if (value < range.min || value > range.max) { - NL_SET_ERR_MSG_ATTR(extack, nla, - "integer out of range"); + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "integer out of range"); return -ERANGE; } @@ -274,7 +301,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt, static int nla_validate_int_range(const struct nla_policy *pt, const struct nlattr *nla, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + unsigned int validate) { switch (pt->type) { case NLA_U8: @@ -282,7 +310,8 @@ static int nla_validate_int_range(const struct nla_policy *pt, case NLA_U32: case NLA_U64: case NLA_MSECS: - return nla_validate_int_range_unsigned(pt, nla, extack); + case NLA_BINARY: + return nla_validate_range_unsigned(pt, nla, extack, validate); case NLA_S8: case NLA_S16: case NLA_S32: @@ -294,6 +323,37 @@ static int nla_validate_int_range(const struct nla_policy *pt, } } +static int nla_validate_mask(const struct nla_policy *pt, + const struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + u64 value; + + switch (pt->type) { + case NLA_U8: + value = nla_get_u8(nla); + break; + case NLA_U16: + value = nla_get_u16(nla); + break; + case NLA_U32: + value = nla_get_u32(nla); + break; + case NLA_U64: + value = nla_get_u64(nla); + break; + default: + return -EINVAL; + } + + if (value & ~(u64)pt->mask) { + NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set"); + return -EINVAL; + } + + return 0; +} + static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, unsigned int depth) @@ -313,15 +373,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype, BUG_ON(pt->type > NLA_TYPE_MAX); - if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) || - (pt->type == NLA_EXACT_LEN && - pt->validation_type == NLA_VALIDATE_WARN_TOO_LONG && - attrlen != pt->len)) { + if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, type); if (validate & NL_VALIDATE_STRICT_ATTRS) { - NL_SET_ERR_MSG_ATTR(extack, nla, - "invalid attribute length"); + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "invalid attribute length"); return -EINVAL; } } @@ -329,14 +386,14 @@ static int validate_nla(const struct nlattr *nla, int maxtype, if (validate & NL_VALIDATE_NESTED) { if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) && !(nla->nla_type & NLA_F_NESTED)) { - NL_SET_ERR_MSG_ATTR(extack, nla, - "NLA_F_NESTED is missing"); + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "NLA_F_NESTED is missing"); return -EINVAL; } if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY && pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) { - NL_SET_ERR_MSG_ATTR(extack, nla, - "NLA_F_NESTED not expected"); + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "NLA_F_NESTED not expected"); return -EINVAL; } } @@ -449,19 +506,10 @@ static int validate_nla(const struct nlattr *nla, int maxtype, "Unsupported attribute"); return -EINVAL; } - /* fall through */ - case NLA_MIN_LEN: if (attrlen < pt->len) goto out_err; break; - case NLA_EXACT_LEN: - if (pt->validation_type != NLA_VALIDATE_WARN_TOO_LONG) { - if (attrlen != pt->len) - goto out_err; - break; - } - /* fall through */ default: if (pt->len) minlen = pt->len; @@ -479,9 +527,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype, break; case NLA_VALIDATE_RANGE_PTR: case NLA_VALIDATE_RANGE: + case NLA_VALIDATE_RANGE_WARN_TOO_LONG: case NLA_VALIDATE_MIN: case NLA_VALIDATE_MAX: - err = nla_validate_int_range(pt, nla, extack); + err = nla_validate_int_range(pt, nla, extack, validate); + if (err) + return err; + break; + case NLA_VALIDATE_MASK: + err = nla_validate_mask(pt, nla, extack); if (err) return err; break; @@ -496,7 +550,8 @@ static int validate_nla(const struct nlattr *nla, int maxtype, return 0; out_err: - NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation"); + NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, + "Attribute failed policy validation"); return err; } @@ -816,8 +871,7 @@ EXPORT_SYMBOL(__nla_reserve); struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr) { - if (nla_need_padding_for_64bit(skb)) - nla_align_64bit(skb, padattr); + nla_align_64bit(skb, padattr); return __nla_reserve(skb, attrtype, attrlen); } diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 15ca78e1c7d4..8abe1870dba4 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -85,12 +85,16 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, put_cpu(); } +// Dump stacks even for idle CPUs. +static bool backtrace_idle; +module_param(backtrace_idle, bool, 0644); + bool nmi_cpu_backtrace(struct pt_regs *regs) { int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { - if (regs && cpu_in_idle(instruction_pointer(regs))) { + if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", cpu, (void *)instruction_pointer(regs)); } else { diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index f61689a96e85..00f666d94486 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -85,7 +85,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) preempt_disable(); count = __this_cpu_read(*fbc->counters) + amount; - if (count >= batch || count <= -batch) { + if (abs(count) >= batch) { unsigned long flags; raw_spin_lock_irqsave(&fbc->lock, flags); fbc->count += count; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8e4a3a4397f2..3a4da11b804d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -20,7 +20,6 @@ #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/percpu.h> -#include <linux/local_lock.h> #include <linux/preempt.h> /* in_interrupt() */ #include <linux/radix-tree.h> #include <linux/rcupdate.h> @@ -325,7 +324,7 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) int ret = -ENOMEM; /* - * Nodes preloaded by one cgroup can be be used by another cgroup, so + * Nodes preloaded by one cgroup can be used by another cgroup, so * they should never be accounted to any particular memory cgroup. */ gfp_mask &= ~__GFP_ACCOUNT; diff --git a/lib/random32.c b/lib/random32.c index dfb9981ab798..4d0e05e471d7 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -38,19 +38,10 @@ #include <linux/jiffies.h> #include <linux/random.h> #include <linux/sched.h> +#include <linux/bitops.h> #include <asm/unaligned.h> #include <trace/events/random.h> -#ifdef CONFIG_RANDOM32_SELFTEST -static void __init prandom_state_selftest(void); -#else -static inline void prandom_state_selftest(void) -{ -} -#endif - -DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; - /** * prandom_u32_state - seeded pseudo-random number generator. * @state: pointer to state structure holding seeded state. @@ -71,26 +62,6 @@ u32 prandom_u32_state(struct rnd_state *state) EXPORT_SYMBOL(prandom_u32_state); /** - * prandom_u32 - pseudo random number generator - * - * A 32 bit pseudo-random number is generated using a fast - * algorithm suitable for simulation. This algorithm is NOT - * considered safe for cryptographic use. - */ -u32 prandom_u32(void) -{ - struct rnd_state *state = &get_cpu_var(net_rand_state); - u32 res; - - res = prandom_u32_state(state); - trace_prandom_u32(res); - put_cpu_var(net_rand_state); - - return res; -} -EXPORT_SYMBOL(prandom_u32); - -/** * prandom_bytes_state - get the requested number of pseudo-random bytes * * @state: pointer to state structure holding seeded state. @@ -121,20 +92,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) } EXPORT_SYMBOL(prandom_bytes_state); -/** - * prandom_bytes - get the requested number of pseudo-random bytes - * @buf: where to copy the pseudo-random bytes to - * @bytes: the requested number of bytes - */ -void prandom_bytes(void *buf, size_t bytes) -{ - struct rnd_state *state = &get_cpu_var(net_rand_state); - - prandom_bytes_state(state, buf, bytes); - put_cpu_var(net_rand_state); -} -EXPORT_SYMBOL(prandom_bytes); - static void prandom_warmup(struct rnd_state *state) { /* Calling RNG ten times to satisfy recurrence condition */ @@ -150,96 +107,6 @@ static void prandom_warmup(struct rnd_state *state) prandom_u32_state(state); } -static u32 __extract_hwseed(void) -{ - unsigned int val = 0; - - (void)(arch_get_random_seed_int(&val) || - arch_get_random_int(&val)); - - return val; -} - -static void prandom_seed_early(struct rnd_state *state, u32 seed, - bool mix_with_hwseed) -{ -#define LCG(x) ((x) * 69069U) /* super-duper LCG */ -#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) - state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); - state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); - state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); - state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); -} - -/** - * prandom_seed - add entropy to pseudo random number generator - * @entropy: entropy value - * - * Add some additional entropy to the prandom pool. - */ -void prandom_seed(u32 entropy) -{ - int i; - /* - * No locking on the CPUs, but then somewhat random results are, well, - * expected. - */ - for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state, i); - - state->s1 = __seed(state->s1 ^ entropy, 2U); - prandom_warmup(state); - } -} -EXPORT_SYMBOL(prandom_seed); - -/* - * Generate some initially weak seeding values to allow - * to start the prandom_u32() engine. - */ -static int __init prandom_init(void) -{ - int i; - - prandom_state_selftest(); - - for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state, i); - u32 weak_seed = (i + jiffies) ^ random_get_entropy(); - - prandom_seed_early(state, weak_seed, true); - prandom_warmup(state); - } - - return 0; -} -core_initcall(prandom_init); - -static void __prandom_timer(struct timer_list *unused); - -static DEFINE_TIMER(seed_timer, __prandom_timer); - -static void __prandom_timer(struct timer_list *unused) -{ - u32 entropy; - unsigned long expires; - - get_random_bytes(&entropy, sizeof(entropy)); - prandom_seed(entropy); - - /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ - expires = 40 + prandom_u32_max(40); - seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); - - add_timer(&seed_timer); -} - -static void __init __prandom_start_seed_timer(void) -{ - seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); - add_timer(&seed_timer); -} - void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) { int i; @@ -259,51 +126,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) } EXPORT_SYMBOL(prandom_seed_full_state); -/* - * Generate better values after random number generator - * is fully initialized. - */ -static void __prandom_reseed(bool late) -{ - unsigned long flags; - static bool latch = false; - static DEFINE_SPINLOCK(lock); - - /* Asking for random bytes might result in bytes getting - * moved into the nonblocking pool and thus marking it - * as initialized. In this case we would double back into - * this function and attempt to do a late reseed. - * Ignore the pointless attempt to reseed again if we're - * already waiting for bytes when the nonblocking pool - * got initialized. - */ - - /* only allow initial seeding (late == false) once */ - if (!spin_trylock_irqsave(&lock, flags)) - return; - - if (latch && !late) - goto out; - - latch = true; - prandom_seed_full_state(&net_rand_state); -out: - spin_unlock_irqrestore(&lock, flags); -} - -void prandom_reseed_late(void) -{ - __prandom_reseed(true); -} - -static int __init prandom_reseed(void) -{ - __prandom_reseed(false); - __prandom_start_seed_timer(); - return 0; -} -late_initcall(prandom_reseed); - #ifdef CONFIG_RANDOM32_SELFTEST static struct prandom_test1 { u32 seed; @@ -423,7 +245,28 @@ static struct prandom_test2 { { 407983964U, 921U, 728767059U }, }; -static void __init prandom_state_selftest(void) +static u32 __extract_hwseed(void) +{ + unsigned int val = 0; + + (void)(arch_get_random_seed_int(&val) || + arch_get_random_int(&val)); + + return val; +} + +static void prandom_seed_early(struct rnd_state *state, u32 seed, + bool mix_with_hwseed) +{ +#define LCG(x) ((x) * 69069U) /* super-duper LCG */ +#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) + state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); + state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); + state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); + state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); +} + +static int __init prandom_state_selftest(void) { int i, j, errors = 0, runs = 0; bool error = false; @@ -463,5 +306,327 @@ static void __init prandom_state_selftest(void) pr_warn("prandom: %d/%d self tests failed\n", errors, runs); else pr_info("prandom: %d self tests passed\n", runs); + return 0; } +core_initcall(prandom_state_selftest); #endif + +/* + * The prandom_u32() implementation is now completely separate from the + * prandom_state() functions, which are retained (for now) for compatibility. + * + * Because of (ab)use in the networking code for choosing random TCP/UDP port + * numbers, which open DoS possibilities if guessable, we want something + * stronger than a standard PRNG. But the performance requirements of + * the network code do not allow robust crypto for this application. + * + * So this is a homebrew Junior Spaceman implementation, based on the + * lowest-latency trustworthy crypto primitive available, SipHash. + * (The authors of SipHash have not been consulted about this abuse of + * their work.) + * + * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to + * one word of output. This abbreviated version uses 2 rounds per word + * of output. + */ + +struct siprand_state { + unsigned long v0; + unsigned long v1; + unsigned long v2; + unsigned long v3; +}; + +static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(unsigned long, net_rand_noise); +EXPORT_PER_CPU_SYMBOL(net_rand_noise); + +/* + * This is the core CPRNG function. As "pseudorandom", this is not used + * for truly valuable things, just intended to be a PITA to guess. + * For maximum speed, we do just two SipHash rounds per word. This is + * the same rate as 4 rounds per 64 bits that SipHash normally uses, + * so hopefully it's reasonably secure. + * + * There are two changes from the official SipHash finalization: + * - We omit some constants XORed with v2 in the SipHash spec as irrelevant; + * they are there only to make the output rounds distinct from the input + * rounds, and this application has no input rounds. + * - Rather than returning v0^v1^v2^v3, return v1+v3. + * If you look at the SipHash round, the last operation on v3 is + * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time. + * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but + * it still cancels out half of the bits in v2 for no benefit.) + * Second, since the last combining operation was xor, continue the + * pattern of alternating xor/add for a tiny bit of extra non-linearity. + */ +static inline u32 siprand_u32(struct siprand_state *s) +{ + unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3; + unsigned long n = raw_cpu_read(net_rand_noise); + + v3 ^= n; + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + v0 ^= n; + s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3; + return v1 + v3; +} + + +/** + * prandom_u32 - pseudo random number generator + * + * A 32 bit pseudo-random number is generated using a fast + * algorithm suitable for simulation. This algorithm is NOT + * considered safe for cryptographic use. + */ +u32 prandom_u32(void) +{ + struct siprand_state *state = get_cpu_ptr(&net_rand_state); + u32 res = siprand_u32(state); + + trace_prandom_u32(res); + put_cpu_ptr(&net_rand_state); + return res; +} +EXPORT_SYMBOL(prandom_u32); + +/** + * prandom_bytes - get the requested number of pseudo-random bytes + * @buf: where to copy the pseudo-random bytes to + * @bytes: the requested number of bytes + */ +void prandom_bytes(void *buf, size_t bytes) +{ + struct siprand_state *state = get_cpu_ptr(&net_rand_state); + u8 *ptr = buf; + + while (bytes >= sizeof(u32)) { + put_unaligned(siprand_u32(state), (u32 *)ptr); + ptr += sizeof(u32); + bytes -= sizeof(u32); + } + + if (bytes > 0) { + u32 rem = siprand_u32(state); + + do { + *ptr++ = (u8)rem; + rem >>= BITS_PER_BYTE; + } while (--bytes > 0); + } + put_cpu_ptr(&net_rand_state); +} +EXPORT_SYMBOL(prandom_bytes); + +/** + * prandom_seed - add entropy to pseudo random number generator + * @entropy: entropy value + * + * Add some additional seed material to the prandom pool. + * The "entropy" is actually our IP address (the only caller is + * the network code), not for unpredictability, but to ensure that + * different machines are initialized differently. + */ +void prandom_seed(u32 entropy) +{ + int i; + + add_device_randomness(&entropy, sizeof(entropy)); + + for_each_possible_cpu(i) { + struct siprand_state *state = per_cpu_ptr(&net_rand_state, i); + unsigned long v0 = state->v0, v1 = state->v1; + unsigned long v2 = state->v2, v3 = state->v3; + + do { + v3 ^= entropy; + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + v0 ^= entropy; + } while (unlikely(!v0 || !v1 || !v2 || !v3)); + + WRITE_ONCE(state->v0, v0); + WRITE_ONCE(state->v1, v1); + WRITE_ONCE(state->v2, v2); + WRITE_ONCE(state->v3, v3); + } +} +EXPORT_SYMBOL(prandom_seed); + +/* + * Generate some initially weak seeding values to allow + * the prandom_u32() engine to be started. + */ +static int __init prandom_init_early(void) +{ + int i; + unsigned long v0, v1, v2, v3; + + if (!arch_get_random_long(&v0)) + v0 = jiffies; + if (!arch_get_random_long(&v1)) + v1 = random_get_entropy(); + v2 = v0 ^ PRND_K0; + v3 = v1 ^ PRND_K1; + + for_each_possible_cpu(i) { + struct siprand_state *state; + + v3 ^= i; + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + v0 ^= i; + + state = per_cpu_ptr(&net_rand_state, i); + state->v0 = v0; state->v1 = v1; + state->v2 = v2; state->v3 = v3; + } + + return 0; +} +core_initcall(prandom_init_early); + + +/* Stronger reseeding when available, and periodically thereafter. */ +static void prandom_reseed(struct timer_list *unused); + +static DEFINE_TIMER(seed_timer, prandom_reseed); + +static void prandom_reseed(struct timer_list *unused) +{ + unsigned long expires; + int i; + + /* + * Reinitialize each CPU's PRNG with 128 bits of key. + * No locking on the CPUs, but then somewhat random results are, + * well, expected. + */ + for_each_possible_cpu(i) { + struct siprand_state *state; + unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0; + unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1; +#if BITS_PER_LONG == 32 + int j; + + /* + * On 32-bit machines, hash in two extra words to + * approximate 128-bit key length. Not that the hash + * has that much security, but this prevents a trivial + * 64-bit brute force. + */ + for (j = 0; j < 2; j++) { + unsigned long m = get_random_long(); + + v3 ^= m; + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + v0 ^= m; + } +#endif + /* + * Probably impossible in practice, but there is a + * theoretical risk that a race between this reseeding + * and the target CPU writing its state back could + * create the all-zero SipHash fixed point. + * + * To ensure that never happens, ensure the state + * we write contains no zero words. + */ + state = per_cpu_ptr(&net_rand_state, i); + WRITE_ONCE(state->v0, v0 ? v0 : -1ul); + WRITE_ONCE(state->v1, v1 ? v1 : -1ul); + WRITE_ONCE(state->v2, v2 ? v2 : -1ul); + WRITE_ONCE(state->v3, v3 ? v3 : -1ul); + } + + /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ + expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ)); + mod_timer(&seed_timer, expires); +} + +/* + * The random ready callback can be called from almost any interrupt. + * To avoid worrying about whether it's safe to delay that interrupt + * long enough to seed all CPUs, just schedule an immediate timer event. + */ +static void prandom_timer_start(struct random_ready_callback *unused) +{ + mod_timer(&seed_timer, jiffies); +} + +#ifdef CONFIG_RANDOM32_SELFTEST +/* Principle: True 32-bit random numbers will all have 16 differing bits on + * average. For each 32-bit number, there are 601M numbers differing by 16 + * bits, and 89% of the numbers differ by at least 12 bits. Note that more + * than 16 differing bits also implies a correlation with inverted bits. Thus + * we take 1024 random numbers and compare each of them to the other ones, + * counting the deviation of correlated bits to 16. Constants report 32, + * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the + * u32 total, TEST_SIZE may be as large as 4096 samples. + */ +#define TEST_SIZE 1024 +static int __init prandom32_state_selftest(void) +{ + unsigned int x, y, bits, samples; + u32 xor, flip; + u32 total; + u32 *data; + + data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL); + if (!data) + return 0; + + for (samples = 0; samples < TEST_SIZE; samples++) + data[samples] = prandom_u32(); + + flip = total = 0; + for (x = 0; x < samples; x++) { + for (y = 0; y < samples; y++) { + if (x == y) + continue; + xor = data[x] ^ data[y]; + flip |= xor; + bits = hweight32(xor); + total += (bits - 16) * (bits - 16); + } + } + + /* We'll return the average deviation as 2*sqrt(corr/samples), which + * is also sqrt(4*corr/samples) which provides a better resolution. + */ + bits = int_sqrt(total / (samples * (samples - 1)) * 4); + if (bits > 6) + pr_warn("prandom32: self test failed (at least %u bits" + " correlated, fixed_mask=%#x fixed_value=%#x\n", + bits, ~flip, data[0] & ~flip); + else + pr_info("prandom32: self test passed (less than %u bits" + " correlated)\n", + bits+1); + kfree(data); + return 0; +} +core_initcall(prandom32_state_selftest); +#endif /* CONFIG_RANDOM32_SELFTEST */ + +/* + * Start periodic full reseeding as soon as strong + * random numbers are available. + */ +static int __init prandom_init_late(void) +{ + static struct random_ready_callback random_ready = { + .func = prandom_timer_start + }; + int ret = add_random_ready_callback(&random_ready); + + if (ret == -EALREADY) { + prandom_timer_start(&random_ready); + ret = 0; + } + return ret; +} +late_initcall(prandom_init_late); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 5d63a8857f36..a59778946404 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -365,6 +365,37 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) } EXPORT_SYMBOL(sg_alloc_table); +static struct scatterlist *get_next_sg(struct sg_table *table, + struct scatterlist *cur, + unsigned long needed_sges, + gfp_t gfp_mask) +{ + struct scatterlist *new_sg, *next_sg; + unsigned int alloc_size; + + if (cur) { + next_sg = sg_next(cur); + /* Check if last entry should be keeped for chainning */ + if (!sg_is_last(next_sg) || needed_sges == 1) + return next_sg; + } + + alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC); + new_sg = sg_kmalloc(alloc_size, gfp_mask); + if (!new_sg) + return ERR_PTR(-ENOMEM); + sg_init_table(new_sg, alloc_size); + if (cur) { + __sg_chain(next_sg, new_sg); + table->orig_nents += alloc_size - 1; + } else { + table->sgl = new_sg; + table->orig_nents = alloc_size; + table->nents = 0; + } + return new_sg; +} + /** * __sg_alloc_table_from_pages - Allocate and initialize an sg table from * an array of pages @@ -373,30 +404,69 @@ EXPORT_SYMBOL(sg_alloc_table); * @n_pages: Number of pages in the pages array * @offset: Offset from start of the first page to the start of a buffer * @size: Number of valid bytes in the buffer (after offset) - * @max_segment: Maximum size of a scatterlist node in bytes (page aligned) + * @max_segment: Maximum size of a scatterlist element in bytes + * @prv: Last populated sge in sgt + * @left_pages: Left pages caller have to set after this call * @gfp_mask: GFP allocation mask * - * Description: - * Allocate and initialize an sg table from a list of pages. Contiguous - * ranges of the pages are squashed into a single scatterlist node up to the - * maximum size specified in @max_segment. An user may provide an offset at a - * start and a size of valid data in a buffer specified by the page array. - * The returned sg table is released by sg_free_table. + * Description: + * If @prv is NULL, allocate and initialize an sg table from a list of pages, + * else reuse the scatterlist passed in at @prv. + * Contiguous ranges of the pages are squashed into a single scatterlist + * entry up to the maximum size specified in @max_segment. A user may + * provide an offset at a start and a size of valid data in a buffer + * specified by the page array. * * Returns: - * 0 on success, negative error on failure + * Last SGE in sgt on success, PTR_ERR on otherwise. + * The allocation in @sgt must be released by sg_free_table. + * + * Notes: + * If this function returns non-0 (eg failure), the caller must call + * sg_free_table() to cleanup any leftover allocations. */ -int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, unsigned int max_segment, - gfp_t gfp_mask) +struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, unsigned int offset, + unsigned long size, unsigned int max_segment, + struct scatterlist *prv, unsigned int left_pages, + gfp_t gfp_mask) { - unsigned int chunks, cur_page, seg_len, i; - int ret; - struct scatterlist *s; + unsigned int chunks, cur_page, seg_len, i, prv_len = 0; + unsigned int added_nents = 0; + struct scatterlist *s = prv; - if (WARN_ON(!max_segment || offset_in_page(max_segment))) - return -EINVAL; + /* + * The algorithm below requires max_segment to be aligned to PAGE_SIZE + * otherwise it can overshoot. + */ + max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE); + if (WARN_ON(max_segment < PAGE_SIZE)) + return ERR_PTR(-EINVAL); + + if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv) + return ERR_PTR(-EOPNOTSUPP); + + if (prv) { + unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE + + prv->offset + prv->length) / + PAGE_SIZE; + + if (WARN_ON(offset)) + return ERR_PTR(-EINVAL); + + /* Merge contiguous pages into the last SG */ + prv_len = prv->length; + while (n_pages && page_to_pfn(pages[0]) == paddr) { + if (prv->length + PAGE_SIZE > max_segment) + break; + prv->length += PAGE_SIZE; + paddr++; + pages++; + n_pages--; + } + if (!n_pages) + goto out; + } /* compute number of contiguous chunks */ chunks = 1; @@ -410,13 +480,9 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, } } - ret = sg_alloc_table(sgt, chunks, gfp_mask); - if (unlikely(ret)) - return ret; - /* merging chunks and putting them into the scatterlist */ cur_page = 0; - for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { + for (i = 0; i < chunks; i++) { unsigned int j, chunk_size; /* look for the end of the current chunk */ @@ -429,15 +495,30 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, break; } + /* Pass how many chunks might be left */ + s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask); + if (IS_ERR(s)) { + /* + * Adjust entry length to be as before function was + * called. + */ + if (prv) + prv->length = prv_len; + return s; + } chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; sg_set_page(s, pages[cur_page], min_t(unsigned long, size, chunk_size), offset); + added_nents++; size -= chunk_size; offset = 0; cur_page = j; } - - return 0; + sgt->nents += added_nents; +out: + if (!left_pages) + sg_mark_end(s); + return s; } EXPORT_SYMBOL(__sg_alloc_table_from_pages); @@ -465,8 +546,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, gfp_t gfp_mask) { - return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, - SCATTERLIST_MAX_SEGMENT, gfp_mask); + return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, + offset, size, UINT_MAX, NULL, 0, gfp_mask)); } EXPORT_SYMBOL(sg_alloc_table_from_pages); @@ -504,7 +585,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length, nalloc++; } sgl = kmalloc_array(nalloc, sizeof(struct scatterlist), - (gfp & ~GFP_DMA) | __GFP_ZERO); + gfp & ~GFP_DMA); if (!sgl) return NULL; @@ -514,7 +595,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length, elem_len = min_t(u64, length, PAGE_SIZE << order); page = alloc_pages(gfp, order); if (!page) { - sgl_free(sgl); + sgl_free_order(sgl, order); return NULL; } @@ -852,7 +933,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, sg_miter_start(&miter, sgl, nents, sg_flags); if (!sg_miter_skip(&miter, skip)) - return false; + return 0; while ((offset < buflen) && sg_miter_next(&miter)) { unsigned int len; diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 34696a348864..122d8d0e253c 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/compiler.h> #include <linux/export.h> +#include <linux/fault-inject-usercopy.h> #include <linux/kasan-checks.h> #include <linux/thread_info.h> #include <linux/uaccess.h> @@ -34,17 +35,32 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, goto byte_at_a_time; while (max >= sizeof(unsigned long)) { - unsigned long c, data; + unsigned long c, data, mask; /* Fall back to byte-at-a-time if we get a page fault */ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); - *(unsigned long *)(dst+res) = c; + /* + * Note that we mask out the bytes following the NUL. This is + * important to do because string oblivious code may read past + * the NUL. For those routines, we don't want to give them + * potentially random bytes after the NUL in `src`. + * + * One example of such code is BPF map keys. BPF treats map keys + * as an opaque set of bytes. Without the post-NUL mask, any BPF + * maps keyed by strings returned from strncpy_from_user() may + * have multiple entries for semantically identical strings. + */ if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); + mask = zero_bytemask(data); + *(unsigned long *)(dst+res) = c & mask; return res + find_zero(data); } + + *(unsigned long *)(dst+res) = c; + res += sizeof(unsigned long); max -= sizeof(unsigned long); } @@ -99,6 +115,8 @@ long strncpy_from_user(char *dst, const char __user *src, long count) unsigned long max_addr, src_addr; might_fault(); + if (should_fail_usercopy()) + return -EFAULT; if (unlikely(count <= 0)) return 0; diff --git a/lib/syscall.c b/lib/syscall.c index fb328e7ccb08..8533d2fea2d7 100644 --- a/lib/syscall.c +++ b/lib/syscall.c @@ -44,7 +44,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info * .data.instruction_pointer - filled with user PC * * If @target is blocked in a system call, returns zero with @info.data.nr - * set to the the call's number and @info.data.args filled in with its + * set to the call's number and @info.data.args filled in with its * arguments. Registers not used for system call arguments may not be available * and it is not kosher to use &struct user_regset calls while the system * call is still in progress. Note we may get this result if @target diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index df903c53952b..4425a1dd4ef1 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -354,50 +354,37 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = { }; -static void __init __test_bitmap_parselist(int is_user) +static void __init test_bitmap_parselist(void) { int i; int err; ktime_t time; DECLARE_BITMAP(bmap, 2048); - char *mode = is_user ? "_user" : ""; for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) { #define ptest parselist_tests[i] - if (is_user) { - mm_segment_t orig_fs = get_fs(); - size_t len = strlen(ptest.in); - - set_fs(KERNEL_DS); - time = ktime_get(); - err = bitmap_parselist_user((__force const char __user *)ptest.in, len, - bmap, ptest.nbits); - time = ktime_get() - time; - set_fs(orig_fs); - } else { - time = ktime_get(); - err = bitmap_parselist(ptest.in, bmap, ptest.nbits); - time = ktime_get() - time; - } + time = ktime_get(); + err = bitmap_parselist(ptest.in, bmap, ptest.nbits); + time = ktime_get() - time; if (err != ptest.errno) { - pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n", - mode, i, ptest.in, err, ptest.errno); + pr_err("parselist: %d: input is %s, errno is %d, expected %d\n", + i, ptest.in, err, ptest.errno); continue; } if (!err && ptest.expected && !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) { - pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n", - mode, i, ptest.in, bmap[0], + pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n", + i, ptest.in, bmap[0], *ptest.expected); continue; } if (ptest.flags & PARSE_TIME) - pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n", - mode, i, ptest.in, time); + pr_err("parselist: %d: input is '%s' OK, Time: %llu\n", + i, ptest.in, time); #undef ptest } @@ -443,75 +430,41 @@ static const struct test_bitmap_parselist parse_tests[] __initconst = { #undef step }; -static void __init __test_bitmap_parse(int is_user) +static void __init test_bitmap_parse(void) { int i; int err; ktime_t time; DECLARE_BITMAP(bmap, 2048); - char *mode = is_user ? "_user" : ""; for (i = 0; i < ARRAY_SIZE(parse_tests); i++) { struct test_bitmap_parselist test = parse_tests[i]; + size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in); - if (is_user) { - size_t len = strlen(test.in); - mm_segment_t orig_fs = get_fs(); - - set_fs(KERNEL_DS); - time = ktime_get(); - err = bitmap_parse_user((__force const char __user *)test.in, len, - bmap, test.nbits); - time = ktime_get() - time; - set_fs(orig_fs); - } else { - size_t len = test.flags & NO_LEN ? - UINT_MAX : strlen(test.in); - time = ktime_get(); - err = bitmap_parse(test.in, len, bmap, test.nbits); - time = ktime_get() - time; - } + time = ktime_get(); + err = bitmap_parse(test.in, len, bmap, test.nbits); + time = ktime_get() - time; if (err != test.errno) { - pr_err("parse%s: %d: input is %s, errno is %d, expected %d\n", - mode, i, test.in, err, test.errno); + pr_err("parse: %d: input is %s, errno is %d, expected %d\n", + i, test.in, err, test.errno); continue; } if (!err && test.expected && !__bitmap_equal(bmap, test.expected, test.nbits)) { - pr_err("parse%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n", - mode, i, test.in, bmap[0], + pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n", + i, test.in, bmap[0], *test.expected); continue; } if (test.flags & PARSE_TIME) - pr_err("parse%s: %d: input is '%s' OK, Time: %llu\n", - mode, i, test.in, time); + pr_err("parse: %d: input is '%s' OK, Time: %llu\n", + i, test.in, time); } } -static void __init test_bitmap_parselist(void) -{ - __test_bitmap_parselist(0); -} - -static void __init test_bitmap_parselist_user(void) -{ - __test_bitmap_parselist(1); -} - -static void __init test_bitmap_parse(void) -{ - __test_bitmap_parse(0); -} - -static void __init test_bitmap_parse_user(void) -{ - __test_bitmap_parse(1); -} - #define EXP1_IN_BITS (sizeof(exp1) * 8) static void __init test_bitmap_arr32(void) @@ -675,9 +628,7 @@ static void __init selftest(void) test_replace(); test_bitmap_arr32(); test_bitmap_parse(); - test_bitmap_parse_user(); test_bitmap_parselist(); - test_bitmap_parselist_user(); test_mem_optimisations(); test_for_each_set_clump8(); test_bitmap_cut(); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index e151a7f10519..80a78877bd93 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -461,7 +461,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); if (!devmem) - return -ENOMEM; + return false; res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, "hmm_dmirror"); diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 63c26171a791..662f862702fc 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test) u64 words[2]; } *ptr1, *ptr2; + /* This test is specifically crafted for the generic mode. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required\n"); + return; + } + ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); @@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test) kfree(ptr2); } +static void kmalloc_uaf_16(struct kunit *test) +{ + struct { + u64 words[2]; + } *ptr1, *ptr2; + + ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); + + ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); + kfree(ptr2); + + KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); + kfree(ptr1); +} + static void kmalloc_oob_memset_2(struct kunit *test) { char *ptr; @@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test) volatile int i = 3; char *p = &global_array[ARRAY_SIZE(global_array) + i]; + /* Only generic mode instruments globals. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required"); + return; + } + KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } @@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test) char alloca_array[i]; char *p = alloca_array - 1; + /* Only generic mode instruments dynamic allocas. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required"); + return; + } + if (!IS_ENABLED(CONFIG_KASAN_STACK)) { kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); return; @@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test) char alloca_array[i]; char *p = alloca_array + i; + /* Only generic mode instruments dynamic allocas. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required"); + return; + } + if (!IS_ENABLED(CONFIG_KASAN_STACK)) { kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); return; @@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test) return; } + if (OOB_TAG_OFF) + size = round_up(size, OOB_TAG_OFF); + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test) return; } + if (OOB_TAG_OFF) + size = round_up(size, OOB_TAG_OFF); + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset(arr, 0, sizeof(arr)); @@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); } -static void kasan_bitops(struct kunit *test) +static void kasan_bitops_modify(struct kunit *test, int nr, void *addr) +{ + KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr)); +} + +static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) +{ + KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); + +#if defined(clear_bit_unlock_is_negative_byte) + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = + clear_bit_unlock_is_negative_byte(nr, addr)); +#endif +} + +static void kasan_bitops_generic(struct kunit *test) { + long *bits; + + /* This test is specifically crafted for the generic mode. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required\n"); + return; + } + /* * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes; * this way we do not actually corrupt other memory. */ - long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); + bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); /* @@ -633,55 +717,34 @@ static void kasan_bitops(struct kunit *test) * below accesses are still out-of-bounds, since bitops are defined to * operate on the whole long the bit is in. */ - KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits)); + kasan_bitops_modify(test, BITS_PER_LONG, bits); /* * Below calls try to access bit beyond allocated memory. */ - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, - __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits)); + kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits); - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + kfree(bits); +} - KUNIT_EXPECT_KASAN_FAIL(test, - __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); +static void kasan_bitops_tags(struct kunit *test) +{ + long *bits; - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + /* This test is specifically crafted for the tag-based mode. */ + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n"); + return; + } - KUNIT_EXPECT_KASAN_FAIL(test, - __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + /* Allocation size will be rounded to up granule size, which is 16. */ + bits = kzalloc(sizeof(*bits), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); - KUNIT_EXPECT_KASAN_FAIL(test, - kasan_int_result = - test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + /* Do the accesses past the 16 allocated bytes. */ + kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]); + kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]); -#if defined(clear_bit_unlock_is_negative_byte) - KUNIT_EXPECT_KASAN_FAIL(test, - kasan_int_result = clear_bit_unlock_is_negative_byte( - BITS_PER_LONG + BITS_PER_BYTE, bits)); -#endif kfree(bits); } @@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_krealloc_more), KUNIT_CASE(kmalloc_oob_krealloc_less), KUNIT_CASE(kmalloc_oob_16), + KUNIT_CASE(kmalloc_uaf_16), KUNIT_CASE(kmalloc_oob_in_memset), KUNIT_CASE(kmalloc_oob_memset_2), KUNIT_CASE(kmalloc_oob_memset_4), @@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kasan_memchr), KUNIT_CASE(kasan_memcmp), KUNIT_CASE(kasan_strings), - KUNIT_CASE(kasan_bitops), + KUNIT_CASE(kasan_bitops_generic), + KUNIT_CASE(kasan_bitops_tags), KUNIT_CASE(kmalloc_double_kzfree), KUNIT_CASE(vmalloc_oob), {} diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 98bc92a91662..3750323973f4 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -16,7 +16,7 @@ */ /* - * This module provides an interface to the the proc sysctl interfaces. This + * This module provides an interface to the proc sysctl interfaces. This * driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the * system unless explicitly requested by name. You can also build this driver * into your kernel. diff --git a/lib/test_xarray.c b/lib/test_xarray.c index d4f97925dbd8..8294f43f4981 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa) xa_destroy(xa); } +static noinline void check_xa_mark_3(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + XA_STATE(xas, xa, 0x41); + void *entry; + int count = 0; + + xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); + xa_set_mark(xa, 0x41, XA_MARK_0); + + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { + count++; + XA_BUG_ON(xa, entry != xa_mk_index(0x40)); + } + XA_BUG_ON(xa, count != 1); + rcu_read_unlock(); + xa_destroy(xa); +#endif +} + static noinline void check_xa_mark(struct xarray *xa) { unsigned long index; @@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa) check_xa_mark_1(xa, index); check_xa_mark_2(xa); + check_xa_mark_3(xa); } static noinline void check_xa_shrink(struct xarray *xa) @@ -393,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); + XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); + XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); xa_erase_index(xa, 12345678); xa_erase_index(xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); @@ -1503,6 +1528,49 @@ static noinline void check_store_range(struct xarray *xa) } } +#ifdef CONFIG_XARRAY_MULTI +static void check_split_1(struct xarray *xa, unsigned long index, + unsigned int order) +{ + XA_STATE(xas, xa, index); + void *entry; + unsigned int i = 0; + + xa_store_order(xa, index, order, xa, GFP_KERNEL); + + xas_split_alloc(&xas, xa, order, GFP_KERNEL); + xas_lock(&xas); + xas_split(&xas, xa, order); + xas_unlock(&xas); + + xa_for_each(xa, index, entry) { + XA_BUG_ON(xa, entry != xa); + i++; + } + XA_BUG_ON(xa, i != 1 << order); + + xa_set_mark(xa, index, XA_MARK_0); + XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); + + xa_destroy(xa); +} + +static noinline void check_split(struct xarray *xa) +{ + unsigned int order; + + XA_BUG_ON(xa, !xa_empty(xa)); + + for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { + check_split_1(xa, 0, order); + check_split_1(xa, 1UL << order, order); + check_split_1(xa, 3UL << order, order); + } +} +#else +static void check_split(struct xarray *xa) { } +#endif + static void check_align_1(struct xarray *xa, char *name) { int i; @@ -1575,14 +1643,9 @@ static noinline void shadow_remove(struct xarray *xa) xa_lock(xa); while ((node = list_first_entry_or_null(&shadow_nodes, struct xa_node, private_list))) { - XA_STATE(xas, node->array, 0); XA_BUG_ON(xa, node->array != xa); list_del_init(&node->private_list); - xas.xa_node = xa_parent_locked(node->array, node); - xas.xa_offset = node->offset; - xas.xa_shift = node->shift + XA_CHUNK_SHIFT; - xas_set_update(&xas, test_update_node); - xas_store(&xas, NULL); + xa_delete_node(node, test_update_node); } xa_unlock(xa); } @@ -1649,6 +1712,26 @@ static noinline void check_account(struct xarray *xa) #endif } +static noinline void check_get_order(struct xarray *xa) +{ + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned int order; + unsigned long i, j; + + for (i = 0; i < 3; i++) + XA_BUG_ON(xa, xa_get_order(xa, i) != 0); + + for (order = 0; order < max_order; order++) { + for (i = 0; i < 10; i++) { + xa_store_order(xa, i << order, order, + xa_mk_index(i << order), GFP_KERNEL); + for (j = i << order; j < (i + 1) << order; j++) + XA_BUG_ON(xa, xa_get_order(xa, j) != order); + xa_erase(xa, i << order); + } + } +} + static noinline void check_destroy(struct xarray *xa) { unsigned long index; @@ -1697,6 +1780,7 @@ static int xarray_checks(void) check_reserve(&array); check_reserve(&xa0); check_multi_store(&array); + check_get_order(&array); check_xa_alloc(); check_find(&array); check_find_entry(&array); @@ -1708,6 +1792,7 @@ static int xarray_checks(void) check_store_range(&array); check_store_iter(&array); check_align(&xa0); + check_split(&array); check_workingset(&array, 0); check_workingset(&array, 64); diff --git a/lib/usercopy.c b/lib/usercopy.c index b26509f112f9..7413dd300516 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> +#include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> #include <linux/uaccess.h> @@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n { unsigned long res = n; might_fault(); - if (likely(access_ok(from, n))) { + if (!should_fail_usercopy() && likely(access_ok(from, n))) { instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } @@ -25,6 +26,8 @@ EXPORT_SYMBOL(_copy_from_user); unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + if (should_fail_usercopy()) + return n; if (likely(access_ok(to, n))) { instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); diff --git a/lib/xarray.c b/lib/xarray.c index e9e641d3c0c3..5fa51614802a 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -266,13 +266,14 @@ static void xa_node_free(struct xa_node *node) */ static void xas_destroy(struct xa_state *xas) { - struct xa_node *node = xas->xa_alloc; + struct xa_node *next, *node = xas->xa_alloc; - if (!node) - return; - XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); - kmem_cache_free(radix_tree_node_cachep, node); - xas->xa_alloc = NULL; + while (node) { + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); + next = rcu_dereference_raw(node->parent); + radix_tree_node_rcu_free(&node->rcu_head); + xas->xa_alloc = node = next; + } } /** @@ -304,6 +305,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp) xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); if (!xas->xa_alloc) return false; + xas->xa_alloc->parent = NULL; XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); xas->xa_node = XAS_RESTART; return true; @@ -339,6 +341,7 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) } if (!xas->xa_alloc) return false; + xas->xa_alloc->parent = NULL; XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); xas->xa_node = XAS_RESTART; return true; @@ -403,7 +406,7 @@ static unsigned long xas_size(const struct xa_state *xas) /* * Use this to calculate the maximum index that will need to be created * in order to add the entry described by @xas. Because we cannot store a - * multiple-index entry at index 0, the calculation is a little more complex + * multi-index entry at index 0, the calculation is a little more complex * than you might expect. */ static unsigned long xas_max(struct xa_state *xas) @@ -703,7 +706,7 @@ void xas_create_range(struct xa_state *xas) unsigned char shift = xas->xa_shift; unsigned char sibs = xas->xa_sibs; - xas->xa_index |= ((sibs + 1) << shift) - 1; + xas->xa_index |= ((sibs + 1UL) << shift) - 1; if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) xas->xa_offset |= sibs; xas->xa_shift = 0; @@ -946,6 +949,153 @@ void xas_init_marks(const struct xa_state *xas) } EXPORT_SYMBOL_GPL(xas_init_marks); +#ifdef CONFIG_XARRAY_MULTI +static unsigned int node_get_marks(struct xa_node *node, unsigned int offset) +{ + unsigned int marks = 0; + xa_mark_t mark = XA_MARK_0; + + for (;;) { + if (node_get_mark(node, offset, mark)) + marks |= 1 << (__force unsigned int)mark; + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } + + return marks; +} + +static void node_set_marks(struct xa_node *node, unsigned int offset, + struct xa_node *child, unsigned int marks) +{ + xa_mark_t mark = XA_MARK_0; + + for (;;) { + if (marks & (1 << (__force unsigned int)mark)) { + node_set_mark(node, offset, mark); + if (child) + node_mark_all(child, mark); + } + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } +} + +/** + * xas_split_alloc() - Allocate memory for splitting an entry. + * @xas: XArray operation state. + * @entry: New entry which will be stored in the array. + * @order: New entry order. + * @gfp: Memory allocation flags. + * + * This function should be called before calling xas_split(). + * If necessary, it will allocate new nodes (and fill them with @entry) + * to prepare for the upcoming split of an entry of @order size into + * entries of the order stored in the @xas. + * + * Context: May sleep if @gfp flags permit. + */ +void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, + gfp_t gfp) +{ + unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + unsigned int mask = xas->xa_sibs; + + /* XXX: no support for splitting really large entries yet */ + if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order)) + goto nomem; + if (xas->xa_shift + XA_CHUNK_SHIFT > order) + return; + + do { + unsigned int i; + void *sibling; + struct xa_node *node; + + node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + if (!node) + goto nomem; + node->array = xas->xa; + for (i = 0; i < XA_CHUNK_SIZE; i++) { + if ((i & mask) == 0) { + RCU_INIT_POINTER(node->slots[i], entry); + sibling = xa_mk_sibling(0); + } else { + RCU_INIT_POINTER(node->slots[i], sibling); + } + } + RCU_INIT_POINTER(node->parent, xas->xa_alloc); + xas->xa_alloc = node; + } while (sibs-- > 0); + + return; +nomem: + xas_destroy(xas); + xas_set_err(xas, -ENOMEM); +} +EXPORT_SYMBOL_GPL(xas_split_alloc); + +/** + * xas_split() - Split a multi-index entry into smaller entries. + * @xas: XArray operation state. + * @entry: New entry to store in the array. + * @order: New entry order. + * + * The value in the entry is copied to all the replacement entries. + * + * Context: Any context. The caller should hold the xa_lock. + */ +void xas_split(struct xa_state *xas, void *entry, unsigned int order) +{ + unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + unsigned int offset, marks; + struct xa_node *node; + void *curr = xas_load(xas); + int values = 0; + + node = xas->xa_node; + if (xas_top(node)) + return; + + marks = node_get_marks(node, xas->xa_offset); + + offset = xas->xa_offset + sibs; + do { + if (xas->xa_shift < node->shift) { + struct xa_node *child = xas->xa_alloc; + + xas->xa_alloc = rcu_dereference_raw(child->parent); + child->shift = node->shift - XA_CHUNK_SHIFT; + child->offset = offset; + child->count = XA_CHUNK_SIZE; + child->nr_values = xa_is_value(entry) ? + XA_CHUNK_SIZE : 0; + RCU_INIT_POINTER(child->parent, node); + node_set_marks(node, offset, child, marks); + rcu_assign_pointer(node->slots[offset], + xa_mk_node(child)); + if (xa_is_value(curr)) + values--; + } else { + unsigned int canon = offset - xas->xa_sibs; + + node_set_marks(node, canon, NULL, marks); + rcu_assign_pointer(node->slots[canon], entry); + while (offset > canon) + rcu_assign_pointer(node->slots[offset--], + xa_mk_sibling(canon)); + values += (xa_is_value(entry) - xa_is_value(curr)) * + (xas->xa_sibs + 1); + } + } while (offset-- > xas->xa_offset); + + node->nr_values += values; +} +EXPORT_SYMBOL_GPL(xas_split); +#endif + /** * xas_pause() - Pause a walk to drop a lock. * @xas: XArray operation state. @@ -1407,7 +1557,7 @@ EXPORT_SYMBOL(__xa_store); * @gfp: Memory allocation flags. * * After this function returns, loads from this index will return @entry. - * Storing into an existing multislot entry updates the entry of every index. + * Storing into an existing multi-index entry updates the entry of every index. * The marks associated with @index are unaffected unless @entry is %NULL. * * Context: Any context. Takes and releases the xa_lock. @@ -1549,7 +1699,7 @@ static void xas_set_range(struct xa_state *xas, unsigned long first, * * After this function returns, loads from any index between @first and @last, * inclusive will return @entry. - * Storing into an existing multislot entry updates the entry of every index. + * Storing into an existing multi-index entry updates the entry of every index. * The marks associated with @index are unaffected unless @entry is %NULL. * * Context: Process context. Takes and releases the xa_lock. May sleep @@ -1592,6 +1742,46 @@ unlock: return xas_result(&xas, NULL); } EXPORT_SYMBOL(xa_store_range); + +/** + * xa_get_order() - Get the order of an entry. + * @xa: XArray. + * @index: Index of the entry. + * + * Return: A number between 0 and 63 indicating the order of the entry. + */ +int xa_get_order(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + void *entry; + int order = 0; + + rcu_read_lock(); + entry = xas_load(&xas); + + if (!entry) + goto unlock; + + if (!xas.xa_node) + goto unlock; + + for (;;) { + unsigned int slot = xas.xa_offset + (1 << order); + + if (slot >= XA_CHUNK_SIZE) + break; + if (!xa_is_sibling(xas.xa_node->slots[slot])) + break; + order++; + } + + order += xas.xa_node->shift; +unlock: + rcu_read_unlock(); + + return order; +} +EXPORT_SYMBOL(xa_get_order); #endif /* CONFIG_XARRAY_MULTI */ /** @@ -1974,6 +2164,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, EXPORT_SYMBOL(xa_extract); /** + * xa_delete_node() - Private interface for workingset code. + * @node: Node to be removed from the tree. + * @update: Function to call to update ancestor nodes. + * + * Context: xa_lock must be held on entry and will not be released. + */ +void xa_delete_node(struct xa_node *node, xa_update_node_t update) +{ + struct xa_state xas = { + .xa = node->array, + .xa_index = (unsigned long)node->offset << + (node->shift + XA_CHUNK_SHIFT), + .xa_shift = node->shift + XA_CHUNK_SHIFT, + .xa_offset = node->offset, + .xa_node = xa_parent_locked(node->array, node), + .xa_update = update, + }; + + xas_store(&xas, NULL); +} +EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */ + +/** * xa_destroy() - Free all internal data structures. * @xa: XArray. * |
