summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Kconfig.debug225
-rw-r--r--lib/Kconfig.kasan20
-rw-r--r--lib/Kconfig.kcsan11
-rw-r--r--lib/Kconfig.kfence12
-rw-r--r--lib/Kconfig.ubsan12
-rw-r--r--lib/Makefile12
-rw-r--r--lib/bitmap.c24
-rw-r--r--lib/crc32.c14
-rw-r--r--lib/crc32test.c2
-rw-r--r--lib/crc64-rocksoft.c126
-rw-r--r--lib/crc64.c28
-rw-r--r--lib/crypto/Kconfig3
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/blake2s.c4
-rw-r--r--lib/crypto/sm3.c246
-rw-r--r--lib/gen_crc64table.c51
-rw-r--r--lib/hexdump.c41
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/kobject.c32
-rw-r--r--lib/kunit/assert.c80
-rw-r--r--lib/kunit/kunit-example-test.c42
-rw-r--r--lib/kunit/test.c35
-rw-r--r--lib/kunit/try-catch.c3
-rw-r--r--lib/list-test.c61
-rw-r--r--lib/logic_iomem.c8
-rw-r--r--lib/lz4/lz4_decompress.c8
-rw-r--r--lib/mpi/mpi-bit.c1
-rw-r--r--lib/overflow_kunit.c (renamed from lib/test_overflow.c)518
-rw-r--r--lib/raid6/test/Makefile4
-rw-r--r--lib/raid6/test/test.c1
-rw-r--r--lib/raid6/vpermxor.uc2
-rw-r--r--lib/random32.c14
-rw-r--r--lib/sbitmap.c42
-rw-r--r--lib/sort.c40
-rw-r--r--lib/stackinit_kunit.c (renamed from lib/test_stackinit.c)269
-rw-r--r--lib/string_helpers.c6
-rw-r--r--lib/strncpy_from_user.c2
-rw-r--r--lib/strnlen_user.c2
-rw-r--r--lib/test_bpf.c315
-rw-r--r--lib/test_fortify/read_overflow2_field-memcpy.c5
-rw-r--r--lib/test_fortify/read_overflow2_field-memmove.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memcpy.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memmove.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memset.c5
-rw-r--r--lib/test_fprobe.c174
-rw-r--r--lib/test_hmm.c4
-rw-r--r--lib/test_kasan.c244
-rw-r--r--lib/test_kmod.c1
-rw-r--r--lib/test_lockup.c11
-rw-r--r--lib/test_xarray.c22
-rw-r--r--lib/ubsan.c10
-rw-r--r--lib/vsprintf.c67
-rw-r--r--lib/xarray.c16
54 files changed, 2138 insertions, 767 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index c80fde816a7e..087e06b4cdfd 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -45,7 +45,6 @@ config BITREVERSE
config HAVE_ARCH_BITREVERSE
bool
default n
- depends on BITREVERSE
help
This option enables the use of hardware bit-reversal instructions on
architectures which support such operations.
@@ -146,6 +145,15 @@ config CRC_T10DIF
kernel tree needs to calculate CRC checks for use with the
SCSI data integrity subsystem.
+config CRC64_ROCKSOFT
+ tristate "CRC calculation for the Rocksoft model CRC64"
+ select CRC64
+ select CRYPTO
+ select CRYPTO_CRC64_ROCKSOFT
+ help
+ This option provides a CRC64 API to a registered crypto driver.
+ This is used with the block layer's data integrity subsystem.
+
config CRC_ITU_T
tristate "CRC ITU-T V.41 functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1555da672275..075cd25363ac 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -208,20 +208,87 @@ config DEBUG_BUGVERBOSE
endmenu # "printk and dmesg options"
+config DEBUG_KERNEL
+ bool "Kernel debugging"
+ help
+ Say Y here if you are developing drivers or trying to debug and
+ identify kernel problems.
+
+config DEBUG_MISC
+ bool "Miscellaneous debug code"
+ default DEBUG_KERNEL
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you need to enable miscellaneous debug code that should
+ be under a more specific debug option but isn't.
+
menu "Compile-time checks and compiler options"
config DEBUG_INFO
- bool "Compile the kernel with debug info"
- depends on DEBUG_KERNEL && !COMPILE_TEST
+ bool
help
- If you say Y here the resulting kernel image will include
- debugging info resulting in a larger kernel image.
+ A kernel debug info option other than "None" has been selected
+ in the "Debug information" choice below, indicating that debug
+ information will be generated for build targets.
+
+choice
+ prompt "Debug information"
+ depends on DEBUG_KERNEL
+ help
+ Selecting something other than "None" results in a kernel image
+ that will include debugging info resulting in a larger kernel image.
This adds debug symbols to the kernel and modules (gcc -g), and
is needed if you intend to use kernel crashdump or binary object
tools like crash, kgdb, LKCD, gdb, etc on the kernel.
- Say Y here only if you plan to debug the kernel.
- If unsure, say N.
+ Choose which version of DWARF debug info to emit. If unsure,
+ select "Toolchain default".
+
+config DEBUG_INFO_NONE
+ bool "Disable debug information"
+ help
+ Do not build the kernel with debugging information, which will
+ result in a faster and smaller build.
+
+config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+ bool "Rely on the toolchain's implicit default DWARF version"
+ select DEBUG_INFO
+ help
+ The implicit default version of DWARF debug info produced by a
+ toolchain changes over time.
+
+ This can break consumers of the debug info that haven't upgraded to
+ support newer revisions, and prevent testing newer versions, but
+ those should be less common scenarios.
+
+config DEBUG_INFO_DWARF4
+ bool "Generate DWARF Version 4 debuginfo"
+ select DEBUG_INFO
+ help
+ Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
+
+ If you have consumers of DWARF debug info that are not ready for
+ newer revisions of DWARF, you may wish to choose this or have your
+ config select this.
+
+config DEBUG_INFO_DWARF5
+ bool "Generate DWARF Version 5 debuginfo"
+ select DEBUG_INFO
+ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+ help
+ Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
+ 5.0+ accepts the -gdwarf-5 flag but only had partial support for some
+ draft features until 7.0), and gdb 8.0+.
+
+ Changes to the structure of debug info in Version 5 allow for around
+ 15-18% savings in resulting image and debug info section sizes as
+ compared to DWARF Version 4. DWARF Version 5 standardizes previous
+ extensions such as accelerators for symbol indexing and the format
+ for fission (.dwo/.dwp) files. Users may not want to select this
+ config if they rely on tooling that has not yet been updated to
+ support DWARF Version 5.
+
+endchoice # "Debug information"
if DEBUG_INFO
@@ -267,56 +334,12 @@ config DEBUG_INFO_SPLIT
to know about the .dwo files and include them.
Incompatible with older versions of ccache.
-choice
- prompt "DWARF version"
- help
- Which version of DWARF debug info to emit.
-
-config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
- bool "Rely on the toolchain's implicit default DWARF version"
- help
- The implicit default version of DWARF debug info produced by a
- toolchain changes over time.
-
- This can break consumers of the debug info that haven't upgraded to
- support newer revisions, and prevent testing newer versions, but
- those should be less common scenarios.
-
- If unsure, say Y.
-
-config DEBUG_INFO_DWARF4
- bool "Generate DWARF Version 4 debuginfo"
- help
- Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
-
- If you have consumers of DWARF debug info that are not ready for
- newer revisions of DWARF, you may wish to choose this or have your
- config select this.
-
-config DEBUG_INFO_DWARF5
- bool "Generate DWARF Version 5 debuginfo"
- depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
- depends on !DEBUG_INFO_BTF || PAHOLE_VERSION >= 121
- help
- Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
- 5.0+ accepts the -gdwarf-5 flag but only had partial support for some
- draft features until 7.0), and gdb 8.0+.
-
- Changes to the structure of debug info in Version 5 allow for around
- 15-18% savings in resulting image and debug info section sizes as
- compared to DWARF Version 4. DWARF Version 5 standardizes previous
- extensions such as accelerators for symbol indexing and the format
- for fission (.dwo/.dwp) files. Users may not want to select this
- config if they rely on tooling that has not yet been updated to
- support DWARF Version 5.
-
-endchoice # "DWARF version"
-
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
+ depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
@@ -339,6 +362,16 @@ config DEBUG_INFO_BTF_MODULES
help
Generate compact split BTF type information for kernel modules.
+config MODULE_ALLOW_BTF_MISMATCH
+ bool "Allow loading modules with non-matching BTF type info"
+ depends on DEBUG_INFO_BTF_MODULES
+ help
+ For modules whose split BTF does not match vmlinux, load without
+ BTF rather than refusing to load. The default behavior with
+ module BTF enabled is to reject modules with such mismatches;
+ this option will still load module BTF where possible but ignore
+ it when a mismatch is found.
+
config GDB_SCRIPTS
bool "Provide GDB scripts for kernel debugging"
help
@@ -424,7 +457,8 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y.
config DEBUG_FORCE_FUNCTION_ALIGN_64B
- bool "Force all function address 64B aligned" if EXPERT
+ bool "Force all function address 64B aligned"
+ depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC)
help
There are cases that a commit from one domain changes the function
address alignment of other domains, and cause magic performance
@@ -593,20 +627,6 @@ source "lib/Kconfig.kcsan"
endmenu
-config DEBUG_KERNEL
- bool "Kernel debugging"
- help
- Say Y here if you are developing drivers or trying to debug and
- identify kernel problems.
-
-config DEBUG_MISC
- bool "Miscellaneous debug code"
- default DEBUG_KERNEL
- depends on DEBUG_KERNEL
- help
- Say Y here if you need to enable miscellaneous debug code that should
- be under a more specific debug option but isn't.
-
menu "Networking Debugging"
source "net/Kconfig.debug"
@@ -1524,6 +1544,29 @@ config CSD_LOCK_WAIT_DEBUG
include the IPI handler function currently executing (if any)
and relevant stack traces.
+choice
+ prompt "Lock debugging: prove subsystem device_lock() correctness"
+ depends on PROVE_LOCKING
+ help
+ For subsystems that have instrumented their usage of the device_lock()
+ with nested annotations, enable lock dependency checking. The locking
+ hierarchy 'subclass' identifiers are not compatible across
+ sub-systems, so only one can be enabled at a time.
+
+config PROVE_NVDIMM_LOCKING
+ bool "NVDIMM"
+ depends on LIBNVDIMM
+ help
+ Enable lockdep to validate nd_device_lock() usage.
+
+config PROVE_CXL_LOCKING
+ bool "CXL"
+ depends on CXL_BUS
+ help
+ Enable lockdep to validate cxl_device_lock() usage.
+
+endchoice
+
endmenu # lock debugging
config TRACE_IRQFLAGS
@@ -2108,6 +2151,18 @@ config KPROBES_SANITY_TEST
Say N if you are unsure.
+config FPROBE_SANITY_TEST
+ bool "Self test for fprobe"
+ depends on DEBUG_KERNEL
+ depends on FPROBE
+ depends on KUNIT=y
+ help
+ This option will enable testing the fprobe when the system boot.
+ A series of tests are made to verify that the fprobe is functioning
+ properly.
+
+ Say N if you are unsure.
+
config BACKTRACE_SELF_TEST
tristate "Self test for the backtrace code"
depends on DEBUG_KERNEL
@@ -2222,9 +2277,6 @@ config TEST_UUID
config TEST_XARRAY
tristate "Test the XArray code at runtime"
-config TEST_OVERFLOW
- tristate "Test check_*_overflow() functions at runtime"
-
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
help
@@ -2509,6 +2561,30 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
+config OVERFLOW_KUNIT_TEST
+ tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the check_*_overflow(), size_*(), allocation, and
+ related functions.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config STACKINIT_KUNIT_TEST
+ tristate "Test level of stack variable initialization" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Test if the kernel is zero-initializing stack variables and
+ padding. Coverage is controlled by compiler flags,
+ CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO,
+ CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
+ or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2600,17 +2676,6 @@ config TEST_OBJAGG
Enable this option to test object aggregation manager on boot
(or module load).
-
-config TEST_STACKINIT
- tristate "Test level of stack variable initialization"
- help
- Test if the kernel is zero-initializing stack variables and
- padding. Coverage is controlled by compiler flags,
- CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
- or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
-
- If unsure, say N.
-
config TEST_MEMINIT
tristate "Test heap/page initialization"
help
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 879757b6dd14..1f3e620188a2 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -178,17 +178,17 @@ config KASAN_TAGS_IDENTIFY
memory consumption.
config KASAN_VMALLOC
- bool "Back mappings in vmalloc space with real shadow memory"
- depends on KASAN_GENERIC && HAVE_ARCH_KASAN_VMALLOC
+ bool "Check accesses to vmalloc allocations"
+ depends on HAVE_ARCH_KASAN_VMALLOC
help
- By default, the shadow region for vmalloc space is the read-only
- zero page. This means that KASAN cannot detect errors involving
- vmalloc space.
-
- Enabling this option will hook in to vmap/vmalloc and back those
- mappings with real shadow memory allocated on demand. This allows
- for KASAN to detect more sorts of errors (and to support vmapped
- stacks), but at the cost of higher memory usage.
+ This mode makes KASAN check accesses to vmalloc allocations for
+ validity.
+
+ With software KASAN modes, checking is done for all types of vmalloc
+ allocations. Enabling this option leads to higher memory usage.
+
+ With hardware tag-based KASAN, only VM_ALLOC mappings are checked.
+ There is no additional memory usage.
config KASAN_KUNIT_TEST
tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 63b70b8c5551..de022445fbba 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -10,21 +10,10 @@ config HAVE_KCSAN_COMPILER
For the list of compilers that support KCSAN, please see
<file:Documentation/dev-tools/kcsan.rst>.
-config KCSAN_KCOV_BROKEN
- def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
- depends on CC_IS_CLANG
- depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=thread -fsanitize-coverage=trace-pc)
- help
- Some versions of clang support either KCSAN and KCOV but not the
- combination of the two.
- See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
- in newer releases.
-
menuconfig KCSAN
bool "KCSAN: dynamic data race detector"
depends on HAVE_ARCH_KCSAN && HAVE_KCSAN_COMPILER
depends on DEBUG_KERNEL && !KASAN
- depends on !KCSAN_KCOV_BROKEN
select STACKTRACE
help
The Kernel Concurrency Sanitizer (KCSAN) is a dynamic
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index 912f252a41fc..459dda9ef619 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -45,6 +45,18 @@ config KFENCE_NUM_OBJECTS
pages are required; with one containing the object and two adjacent
ones used as guard pages.
+config KFENCE_DEFERRABLE
+ bool "Use a deferrable timer to trigger allocations"
+ help
+ Use a deferrable timer to trigger allocations. This avoids forcing
+ CPU wake-ups if the system is idle, at the risk of a less predictable
+ sample interval.
+
+ Warning: The KUnit test suite fails with this option enabled - due to
+ the unpredictability of the sample interval!
+
+ Say N if you are unsure.
+
config KFENCE_STATIC_KEYS
bool "Use static keys to set up allocations" if EXPERT
depends on JUMP_LABEL
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 236c5cefc4cc..f3c57ed51838 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -27,16 +27,6 @@ config UBSAN_TRAP
the system. For some system builders this is an acceptable
trade-off.
-config UBSAN_KCOV_BROKEN
- def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
- depends on CC_IS_CLANG
- depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=bounds -fsanitize-coverage=trace-pc)
- help
- Some versions of clang support either UBSAN or KCOV but not the
- combination of the two.
- See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
- in newer releases.
-
config CC_HAS_UBSAN_BOUNDS
def_bool $(cc-option,-fsanitize=bounds)
@@ -46,7 +36,6 @@ config CC_HAS_UBSAN_ARRAY_BOUNDS
config UBSAN_BOUNDS
bool "Perform array index bounds checking"
default UBSAN
- depends on !UBSAN_KCOV_BROKEN
depends on CC_HAS_UBSAN_ARRAY_BOUNDS || CC_HAS_UBSAN_BOUNDS
help
This option enables detection of directly indexed out of bounds
@@ -72,7 +61,6 @@ config UBSAN_ARRAY_BOUNDS
config UBSAN_LOCAL_BOUNDS
bool "Perform array local bounds checking"
depends on UBSAN_TRAP
- depends on !UBSAN_KCOV_BROKEN
depends on $(cc-option,-fsanitize=local-bounds)
help
This option enables -fsanitize=local-bounds which traps when an
diff --git a/lib/Makefile b/lib/Makefile
index 300f569c626b..6b9ffc1bd1ee 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -77,7 +77,6 @@ obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
-obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
@@ -94,8 +93,6 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
-CFLAGS_test_stackinit.o += $(call cc-disable-warning, switch-unreachable)
-obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
@@ -103,6 +100,8 @@ obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
+CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
+obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
#
# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
@@ -175,6 +174,7 @@ obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
+obj-$(CONFIG_CRC64_ROCKSOFT) += crc64-rocksoft.o
obj-$(CONFIG_XXHASH) += xxhash.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
@@ -363,6 +363,9 @@ obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
+obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
@@ -374,7 +377,8 @@ TEST_FORTIFY_LOG = test_fortify.log
quiet_cmd_test_fortify = TEST $@
cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \
$< $@ "$(NM)" $(CC) $(c_flags) \
- $(call cc-disable-warning,fortify-source)
+ $(call cc-disable-warning,fortify-source) \
+ -DKBUILD_EXTRA_WARN1
targets += $(TEST_FORTIFY_LOGS)
clean-files += $(TEST_FORTIFY_LOGS)
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 926408883456..0d5c2ece0bcb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -492,6 +492,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
* @list: indicates whether the bitmap must be list
* true: print in decimal list format
* false: print in hexadecimal bitmask format
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
*/
static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count)
@@ -512,6 +517,11 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
/**
* bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
*
* The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
* cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
@@ -553,12 +563,6 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
* move to use bin_attribute. In result, we have to pass the corresponding
* parameters such as off, count from bin_attribute show entry to this API.
*
- * @buf: buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- * @off: in the string from which we are copying, We copy to @buf
- * @count: the maximum number of bytes to print
- *
* The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
* is similar with cpumap_print_to_pagebuf(), the difference is that
* bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
@@ -597,6 +601,11 @@ EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
/**
* bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
*
* Everything is same with the above bitmap_print_bitmask_to_buf() except
* the print format.
@@ -807,7 +816,8 @@ EXPORT_SYMBOL(bitmap_parselist);
/**
- * bitmap_parselist_user()
+ * bitmap_parselist_user() - convert user buffer's list format ASCII
+ * string to bitmap
*
* @ubuf: pointer to user buffer containing string.
* @ulen: buffer size in bytes. If string is smaller than this
diff --git a/lib/crc32.c b/lib/crc32.c
index 2a68dfd3b96c..5649847d0a8d 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -194,13 +194,11 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
+ return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE);
}
u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
+ return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
}
#endif
EXPORT_SYMBOL(crc32_le);
@@ -208,6 +206,7 @@ EXPORT_SYMBOL(__crc32c_le);
u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
/*
* This multiplies the polynomials x and y modulo the given modulus.
@@ -332,15 +331,14 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
}
#if CRC_BE_BITS == 1
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
+ return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32test.c b/lib/crc32test.c
index 61ddce2cff77..9b4af79412c4 100644
--- a/lib/crc32test.c
+++ b/lib/crc32test.c
@@ -675,7 +675,7 @@ static int __init crc32c_test(void)
/* pre-warm the cache */
for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
+ bytes += test[i].length;
crc ^= __crc32c_le(test[i].crc, test_buf +
test[i].start, test[i].length);
diff --git a/lib/crc64-rocksoft.c b/lib/crc64-rocksoft.c
new file mode 100644
index 000000000000..fc9ae0da5df7
--- /dev/null
+++ b/lib/crc64-rocksoft.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc64.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <linux/static_key.h>
+#include <linux/notifier.h>
+
+static struct crypto_shash __rcu *crc64_rocksoft_tfm;
+static DEFINE_STATIC_KEY_TRUE(crc64_rocksoft_fallback);
+static DEFINE_MUTEX(crc64_rocksoft_mutex);
+static struct work_struct crc64_rocksoft_rehash_work;
+
+static int crc64_rocksoft_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ strcmp(alg->cra_name, CRC64_ROCKSOFT_STRING))
+ return NOTIFY_DONE;
+
+ schedule_work(&crc64_rocksoft_rehash_work);
+ return NOTIFY_OK;
+}
+
+static void crc64_rocksoft_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
+
+ mutex_lock(&crc64_rocksoft_mutex);
+ old = rcu_dereference_protected(crc64_rocksoft_tfm,
+ lockdep_is_held(&crc64_rocksoft_mutex));
+ new = crypto_alloc_shash(CRC64_ROCKSOFT_STRING, 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc64_rocksoft_mutex);
+ return;
+ }
+ rcu_assign_pointer(crc64_rocksoft_tfm, new);
+ mutex_unlock(&crc64_rocksoft_mutex);
+
+ if (old) {
+ synchronize_rcu();
+ crypto_free_shash(old);
+ } else {
+ static_branch_disable(&crc64_rocksoft_fallback);
+ }
+}
+
+static struct notifier_block crc64_rocksoft_nb = {
+ .notifier_call = crc64_rocksoft_notify,
+};
+
+u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len)
+{
+ struct {
+ struct shash_desc shash;
+ u64 crc;
+ } desc;
+ int err;
+
+ if (static_branch_unlikely(&crc64_rocksoft_fallback))
+ return crc64_rocksoft_generic(crc, buffer, len);
+
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crc64_rocksoft_tfm);
+ desc.crc = crc;
+ err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
+ BUG_ON(err);
+
+ return desc.crc;
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft_update);
+
+u64 crc64_rocksoft(const unsigned char *buffer, size_t len)
+{
+ return crc64_rocksoft_update(0, buffer, len);
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft);
+
+static int __init crc64_rocksoft_mod_init(void)
+{
+ INIT_WORK(&crc64_rocksoft_rehash_work, crc64_rocksoft_rehash);
+ crypto_register_notifier(&crc64_rocksoft_nb);
+ crc64_rocksoft_rehash(&crc64_rocksoft_rehash_work);
+ return 0;
+}
+
+static void __exit crc64_rocksoft_mod_fini(void)
+{
+ crypto_unregister_notifier(&crc64_rocksoft_nb);
+ cancel_work_sync(&crc64_rocksoft_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crc64_rocksoft_tfm, 1));
+}
+
+module_init(crc64_rocksoft_mod_init);
+module_exit(crc64_rocksoft_mod_fini);
+
+static int crc64_rocksoft_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ struct crypto_shash *tfm;
+ int len;
+
+ if (static_branch_unlikely(&crc64_rocksoft_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ rcu_read_lock();
+ tfm = rcu_dereference(crc64_rocksoft_tfm);
+ len = snprintf(buffer, PAGE_SIZE, "%s\n",
+ crypto_shash_driver_name(tfm));
+ rcu_read_unlock();
+
+ return len;
+}
+
+module_param_call(transform, NULL, crc64_rocksoft_transform_show, NULL, 0444);
+
+MODULE_AUTHOR("Keith Busch <kbusch@kernel.org>");
+MODULE_DESCRIPTION("Rocksoft model CRC64 calculation (library API)");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc64");
diff --git a/lib/crc64.c b/lib/crc64.c
index 9f852a89ee2a..61ae8dfb6a1c 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -22,6 +22,13 @@
* x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
* x^7 + x^4 + x + 1
*
+ * crc64rocksoft[256] table is from the Rocksoft specification polynomial
+ * defined as,
+ *
+ * x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 +
+ * x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 +
+ * x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 + x^4 + x^3 + 1
+ *
* Copyright 2018 SUSE Linux.
* Author: Coly Li <colyli@suse.de>
*/
@@ -55,3 +62,24 @@ u64 __pure crc64_be(u64 crc, const void *p, size_t len)
return crc;
}
EXPORT_SYMBOL_GPL(crc64_be);
+
+/**
+ * crc64_rocksoft_generic - Calculate bitwise Rocksoft CRC64
+ * @crc: seed value for computation. 0 for a new CRC calculation, or the
+ * previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len)
+{
+ const unsigned char *_p = p;
+ size_t i;
+
+ crc = ~crc;
+
+ for (i = 0; i < len; i++)
+ crc = (crc >> 8) ^ crc64rocksofttable[(crc & 0xff) ^ *_p++];
+
+ return ~crc;
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft_generic);
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index e8e525650cf2..379a66d7f504 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -123,6 +123,9 @@ config CRYPTO_LIB_CHACHA20POLY1305
config CRYPTO_LIB_SHA256
tristate
+config CRYPTO_LIB_SM3
+ tristate
+
config CRYPTO_LIB_SM4
tristate
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index ed43a41f2dcc..6c872d05d1e6 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -37,6 +37,9 @@ libpoly1305-y += poly1305.o
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
+obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o
+libsm3-y := sm3.o
+
obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o
libsm4-y := sm4.o
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 9364f79937b8..c71c09621c09 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -18,14 +18,14 @@
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
- __blake2s_update(state, in, inlen, blake2s_compress);
+ __blake2s_update(state, in, inlen, false);
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- __blake2s_final(state, out, blake2s_compress);
+ __blake2s_final(state, out, false);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c
new file mode 100644
index 000000000000..d473e358a873
--- /dev/null
+++ b/lib/crypto/sm3.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
+ * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <crypto/sm3.h>
+
+static const u32 ____cacheline_aligned K[64] = {
+ 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
+ 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
+ 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
+ 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
+ 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
+ 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
+ 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
+ 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
+ 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
+ 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
+ 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
+ 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
+ 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
+ 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
+ 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
+ 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
+};
+
+/*
+ * Transform the message X which consists of 16 32-bit-words. See
+ * GM/T 004-2012 for details.
+ */
+#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
+ do { \
+ ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
+ ss2 = ss1 ^ rol32((a), 12); \
+ d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
+ h += GG ## i(e, f, g) + ss1 + (w1); \
+ b = rol32((b), 9); \
+ f = rol32((f), 19); \
+ h = P0((h)); \
+ } while (0)
+
+#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
+ R(1, a, b, c, d, e, f, g, h, t, w1, w2)
+#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
+ R(2, a, b, c, d, e, f, g, h, t, w1, w2)
+
+#define FF1(x, y, z) (x ^ y ^ z)
+#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
+
+#define GG1(x, y, z) FF1(x, y, z)
+#define GG2(x, y, z) ((x & y) | (~x & z))
+
+/* Message expansion */
+#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
+#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
+#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
+#define W1(i) (W[i & 0x0f])
+#define W2(i) (W[i & 0x0f] = \
+ P1(W[i & 0x0f] \
+ ^ W[(i-9) & 0x0f] \
+ ^ rol32(W[(i-3) & 0x0f], 15)) \
+ ^ rol32(W[(i-13) & 0x0f], 7) \
+ ^ W[(i-6) & 0x0f])
+
+static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
+{
+ u32 a, b, c, d, e, f, g, h, ss1, ss2;
+
+ a = sctx->state[0];
+ b = sctx->state[1];
+ c = sctx->state[2];
+ d = sctx->state[3];
+ e = sctx->state[4];
+ f = sctx->state[5];
+ g = sctx->state[6];
+ h = sctx->state[7];
+
+ R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
+ R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
+ R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
+ R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
+ R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
+ R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
+ R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
+ R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
+ R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
+ R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
+ R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
+ R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
+ R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
+ R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
+ R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
+ R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
+
+ R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
+ R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
+ R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
+ R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
+ R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
+ R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
+ R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
+ R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
+ R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
+ R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
+ R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
+ R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
+ R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
+ R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
+ R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
+ R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
+
+ R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
+ R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
+ R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
+ R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
+ R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
+ R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
+ R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
+ R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
+ R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
+ R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
+ R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
+ R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
+ R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
+ R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
+ R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
+ R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
+
+ R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
+ R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
+ R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
+ R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
+ R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
+ R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
+ R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
+ R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
+ R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
+ R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
+ R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
+ R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
+ R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
+ R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
+ R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
+ R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
+
+ sctx->state[0] ^= a;
+ sctx->state[1] ^= b;
+ sctx->state[2] ^= c;
+ sctx->state[3] ^= d;
+ sctx->state[4] ^= e;
+ sctx->state[5] ^= f;
+ sctx->state[6] ^= g;
+ sctx->state[7] ^= h;
+}
+#undef R
+#undef R1
+#undef R2
+#undef I
+#undef W1
+#undef W2
+
+static inline void sm3_block(struct sm3_state *sctx,
+ u8 const *data, int blocks, u32 W[16])
+{
+ while (blocks--) {
+ sm3_transform(sctx, data, W);
+ data += SM3_BLOCK_SIZE;
+ }
+}
+
+void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
+{
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ u32 W[16];
+
+ sctx->count += len;
+
+ if ((partial + len) >= SM3_BLOCK_SIZE) {
+ int blocks;
+
+ if (partial) {
+ int p = SM3_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ sm3_block(sctx, sctx->buffer, 1, W);
+ }
+
+ blocks = len / SM3_BLOCK_SIZE;
+ len %= SM3_BLOCK_SIZE;
+
+ if (blocks) {
+ sm3_block(sctx, data, blocks, W);
+ data += blocks * SM3_BLOCK_SIZE;
+ }
+
+ memzero_explicit(W, sizeof(W));
+
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+}
+EXPORT_SYMBOL_GPL(sm3_update);
+
+void sm3_final(struct sm3_state *sctx, u8 *out)
+{
+ const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ __be32 *digest = (__be32 *)out;
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ u32 W[16];
+ int i;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
+ partial = 0;
+
+ sm3_block(sctx, sctx->buffer, 1, W);
+ }
+
+ memset(sctx->buffer + partial, 0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ sm3_block(sctx, sctx->buffer, 1, W);
+
+ for (i = 0; i < 8; i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ /* Zeroize sensitive information. */
+ memzero_explicit(W, sizeof(W));
+ memzero_explicit(sctx, sizeof(*sctx));
+}
+EXPORT_SYMBOL_GPL(sm3_final);
+
+MODULE_DESCRIPTION("Generic SM3 library");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
index 094b43aef8db..55e222acd0b8 100644
--- a/lib/gen_crc64table.c
+++ b/lib/gen_crc64table.c
@@ -17,10 +17,30 @@
#include <stdio.h>
#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
+#define CRC64_ROCKSOFT_POLY 0x9A6C9329AC4BC9B5ULL
static uint64_t crc64_table[256] = {0};
+static uint64_t crc64_rocksoft_table[256] = {0};
-static void generate_crc64_table(void)
+static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly)
+{
+ uint64_t i, j, c, crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = 0ULL;
+ c = i;
+
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ (c >> j)) & 1)
+ crc = (crc >> 1) ^ poly;
+ else
+ crc >>= 1;
+ }
+ table[i] = crc;
+ }
+}
+
+static void generate_crc64_table(uint64_t table[256], uint64_t poly)
{
uint64_t i, j, c, crc;
@@ -30,26 +50,22 @@ static void generate_crc64_table(void)
for (j = 0; j < 8; j++) {
if ((crc ^ c) & 0x8000000000000000ULL)
- crc = (crc << 1) ^ CRC64_ECMA182_POLY;
+ crc = (crc << 1) ^ poly;
else
crc <<= 1;
c <<= 1;
}
- crc64_table[i] = crc;
+ table[i] = crc;
}
}
-static void print_crc64_table(void)
+static void output_table(uint64_t table[256])
{
int i;
- printf("/* this file is generated - do not edit */\n\n");
- printf("#include <linux/types.h>\n");
- printf("#include <linux/cache.h>\n\n");
- printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
for (i = 0; i < 256; i++) {
- printf("\t0x%016" PRIx64 "ULL", crc64_table[i]);
+ printf("\t0x%016" PRIx64 "ULL", table[i]);
if (i & 0x1)
printf(",\n");
else
@@ -58,9 +74,22 @@ static void print_crc64_table(void)
printf("};\n");
}
+static void print_crc64_tables(void)
+{
+ printf("/* this file is generated - do not edit */\n\n");
+ printf("#include <linux/types.h>\n");
+ printf("#include <linux/cache.h>\n\n");
+ printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
+ output_table(crc64_table);
+
+ printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n");
+ output_table(crc64_rocksoft_table);
+}
+
int main(int argc, char *argv[])
{
- generate_crc64_table();
- print_crc64_table();
+ generate_crc64_table(crc64_table, CRC64_ECMA182_POLY);
+ generate_reflected_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY);
+ print_crc64_tables();
return 0;
}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 9301578f98e8..06833d404398 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -22,15 +22,33 @@ EXPORT_SYMBOL(hex_asc_upper);
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
+ *
+ * This function is used to load cryptographic keys, so it is coded in such a
+ * way that there are no conditions or memory accesses that depend on data.
+ *
+ * Explanation of the logic:
+ * (ch - '9' - 1) is negative if ch <= '9'
+ * ('0' - 1 - ch) is negative if ch >= '0'
+ * we "and" these two values, so the result is negative if ch is in the range
+ * '0' ... '9'
+ * we are only interested in the sign, so we do a shift ">> 8"; note that right
+ * shift of a negative value is implementation-defined, so we cast the
+ * value to (unsigned) before the shift --- we have 0xffffff if ch is in
+ * the range '0' ... '9', 0 otherwise
+ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
+ * in the range '0' ... '9', 0 otherwise
+ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
+ * ... '9', -1 otherwise
+ * the next line is similar to the previous one, but we need to decode both
+ * uppercase and lowercase letters, so we use (ch & 0xdf), which converts
+ * lowercase to uppercase
*/
-int hex_to_bin(char ch)
+int hex_to_bin(unsigned char ch)
{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- ch = tolower(ch);
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- return -1;
+ unsigned char cu = ch & 0xdf;
+ return -1 +
+ ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
+ ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
}
EXPORT_SYMBOL(hex_to_bin);
@@ -45,10 +63,13 @@ EXPORT_SYMBOL(hex_to_bin);
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- int hi = hex_to_bin(*src++);
- int lo = hex_to_bin(*src++);
+ int hi, lo;
- if ((hi < 0) || (lo < 0))
+ hi = hex_to_bin(*src++);
+ if (unlikely(hi < 0))
+ return -EINVAL;
+ lo = hex_to_bin(*src++);
+ if (unlikely(lo < 0))
return -EINVAL;
*dst++ = (hi << 4) | lo;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b0e0acdf96c1..6dd5330f7a99 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -414,6 +414,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
return 0;
buf->ops = &page_cache_pipe_buf_ops;
+ buf->flags = 0;
get_page(page);
buf->page = page;
buf->offset = offset;
@@ -577,6 +578,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
break;
buf->ops = &default_pipe_buf_ops;
+ buf->flags = 0;
buf->page = page;
buf->offset = 0;
buf->len = min_t(ssize_t, left, PAGE_SIZE);
diff --git a/lib/kobject.c b/lib/kobject.c
index 56fa037501b5..5f0e71ab292c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -54,32 +54,6 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
kobj->ktype->get_ownership(kobj, uid, gid);
}
-/*
- * populate_dir - populate directory with attributes.
- * @kobj: object we're working on.
- *
- * Most subsystems have a set of default attributes that are associated
- * with an object that registers with them. This is a helper called during
- * object registration that loops through the default attributes of the
- * subsystem and creates attributes files for them in sysfs.
- */
-static int populate_dir(struct kobject *kobj)
-{
- const struct kobj_type *t = get_ktype(kobj);
- struct attribute *attr;
- int error = 0;
- int i;
-
- if (t && t->default_attrs) {
- for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
- error = sysfs_create_file(kobj, attr);
- if (error)
- break;
- }
- }
- return error;
-}
-
static int create_dir(struct kobject *kobj)
{
const struct kobj_type *ktype = get_ktype(kobj);
@@ -90,12 +64,6 @@ static int create_dir(struct kobject *kobj)
if (error)
return error;
- error = populate_dir(kobj);
- if (error) {
- sysfs_remove_dir(kobj);
- return error;
- }
-
if (ktype) {
error = sysfs_create_groups(kobj, ktype->default_groups);
if (error) {
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index b972bda61c0c..d00d6d181ee8 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -10,12 +10,13 @@
#include "string-stream.h"
-void kunit_base_assert_format(const struct kunit_assert *assert,
+void kunit_assert_prologue(const struct kunit_loc *loc,
+ enum kunit_assert_type type,
struct string_stream *stream)
{
const char *expect_or_assert = NULL;
- switch (assert->type) {
+ switch (type) {
case KUNIT_EXPECTATION:
expect_or_assert = "EXPECTATION";
break;
@@ -25,34 +26,33 @@ void kunit_base_assert_format(const struct kunit_assert *assert,
}
string_stream_add(stream, "%s FAILED at %s:%d\n",
- expect_or_assert, assert->file, assert->line);
+ expect_or_assert, loc->file, loc->line);
}
-EXPORT_SYMBOL_GPL(kunit_base_assert_format);
+EXPORT_SYMBOL_GPL(kunit_assert_prologue);
-void kunit_assert_print_msg(const struct kunit_assert *assert,
- struct string_stream *stream)
+static void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream)
{
- if (assert->message.fmt)
- string_stream_add(stream, "\n%pV", &assert->message);
+ if (message->fmt)
+ string_stream_add(stream, "\n%pV", message);
}
-EXPORT_SYMBOL_GPL(kunit_assert_print_msg);
void kunit_fail_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream)
{
- kunit_base_assert_format(assert, stream);
- string_stream_add(stream, "%pV", &assert->message);
+ string_stream_add(stream, "%pV", message);
}
EXPORT_SYMBOL_GPL(kunit_fail_assert_format);
void kunit_unary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream)
{
struct kunit_unary_assert *unary_assert;
unary_assert = container_of(assert, struct kunit_unary_assert, assert);
- kunit_base_assert_format(assert, stream);
if (unary_assert->expected_true)
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s to be true, but is false\n",
@@ -61,11 +61,12 @@ void kunit_unary_assert_format(const struct kunit_assert *assert,
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s to be false, but is true\n",
unary_assert->condition);
- kunit_assert_print_msg(assert, stream);
+ kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream)
{
struct kunit_ptr_not_err_assert *ptr_assert;
@@ -73,7 +74,6 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
ptr_assert = container_of(assert, struct kunit_ptr_not_err_assert,
assert);
- kunit_base_assert_format(assert, stream);
if (!ptr_assert->value) {
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
@@ -84,7 +84,7 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
ptr_assert->text,
PTR_ERR(ptr_assert->value));
}
- kunit_assert_print_msg(assert, stream);
+ kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
@@ -112,6 +112,7 @@ static bool is_literal(struct kunit *test, const char *text, long long value,
}
void kunit_binary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream)
{
struct kunit_binary_assert *binary_assert;
@@ -119,27 +120,27 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
binary_assert = container_of(assert, struct kunit_binary_assert,
assert);
- kunit_base_assert_format(assert, stream);
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- if (!is_literal(stream->test, binary_assert->left_text,
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ if (!is_literal(stream->test, binary_assert->text->left_text,
binary_assert->left_value, stream->gfp))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
- binary_assert->left_text,
+ binary_assert->text->left_text,
binary_assert->left_value);
- if (!is_literal(stream->test, binary_assert->right_text,
+ if (!is_literal(stream->test, binary_assert->text->right_text,
binary_assert->right_value, stream->gfp))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
- binary_assert->right_text,
+ binary_assert->text->right_text,
binary_assert->right_value);
- kunit_assert_print_msg(assert, stream);
+ kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream)
{
struct kunit_binary_ptr_assert *binary_assert;
@@ -147,19 +148,18 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
binary_assert = container_of(assert, struct kunit_binary_ptr_assert,
assert);
- kunit_base_assert_format(assert, stream);
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px\n",
- binary_assert->left_text,
+ binary_assert->text->left_text,
binary_assert->left_value);
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px",
- binary_assert->right_text,
+ binary_assert->text->right_text,
binary_assert->right_value);
- kunit_assert_print_msg(assert, stream);
+ kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
@@ -180,6 +180,7 @@ static bool is_str_literal(const char *text, const char *value)
}
void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream)
{
struct kunit_binary_str_assert *binary_assert;
@@ -187,20 +188,19 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
binary_assert = container_of(assert, struct kunit_binary_str_assert,
assert);
- kunit_base_assert_format(assert, stream);
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- if (!is_str_literal(binary_assert->left_text, binary_assert->left_value))
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ if (!is_str_literal(binary_assert->text->left_text, binary_assert->left_value))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"\n",
- binary_assert->left_text,
+ binary_assert->text->left_text,
binary_assert->left_value);
- if (!is_str_literal(binary_assert->right_text, binary_assert->right_value))
+ if (!is_str_literal(binary_assert->text->right_text, binary_assert->right_value))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"",
- binary_assert->right_text,
+ binary_assert->text->right_text,
binary_assert->right_value);
- kunit_assert_print_msg(assert, stream);
+ kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 51099b0ca29c..4bbf37c04eba 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -69,6 +69,47 @@ static void example_mark_skipped_test(struct kunit *test)
/* This line should run */
kunit_info(test, "You should see this line.");
}
+
+/*
+ * This test shows off all the types of KUNIT_EXPECT macros.
+ */
+static void example_all_expect_macros_test(struct kunit *test)
+{
+ /* Boolean assertions */
+ KUNIT_EXPECT_TRUE(test, true);
+ KUNIT_EXPECT_FALSE(test, false);
+
+ /* Integer assertions */
+ KUNIT_EXPECT_EQ(test, 1, 1); /* check == */
+ KUNIT_EXPECT_GE(test, 1, 1); /* check >= */
+ KUNIT_EXPECT_LE(test, 1, 1); /* check <= */
+ KUNIT_EXPECT_NE(test, 1, 0); /* check != */
+ KUNIT_EXPECT_GT(test, 1, 0); /* check > */
+ KUNIT_EXPECT_LT(test, 0, 1); /* check < */
+
+ /* Pointer assertions */
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, NULL);
+ KUNIT_EXPECT_PTR_NE(test, test, NULL);
+
+ /* String assertions */
+ KUNIT_EXPECT_STREQ(test, "hi", "hi");
+ KUNIT_EXPECT_STRNEQ(test, "hi", "bye");
+
+ /*
+ * There are also ASSERT variants of all of the above that abort test
+ * execution if they fail. Useful for memory allocations, etc.
+ */
+ KUNIT_ASSERT_GT(test, sizeof(char), 0);
+
+ /*
+ * There are also _MSG variants of all of the above that let you include
+ * additional text on failure.
+ */
+ KUNIT_EXPECT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
+ KUNIT_ASSERT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
+}
+
/*
* Here we make a list of all the test cases we want to add to the test suite
* below.
@@ -83,6 +124,7 @@ static struct kunit_case example_test_cases[] = {
KUNIT_CASE(example_simple_test),
KUNIT_CASE(example_skip_test),
KUNIT_CASE(example_mark_skipped_test),
+ KUNIT_CASE(example_all_expect_macros_test),
{}
};
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index c7ed4aabec04..3bca3bf5c15b 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -240,7 +240,9 @@ static void kunit_print_string_stream(struct kunit *test,
}
}
-static void kunit_fail(struct kunit *test, struct kunit_assert *assert)
+static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
+ enum kunit_assert_type type, struct kunit_assert *assert,
+ const struct va_format *message)
{
struct string_stream *stream;
@@ -250,12 +252,13 @@ static void kunit_fail(struct kunit *test, struct kunit_assert *assert)
if (!stream) {
WARN(true,
"Could not allocate stream to print failed assertion in %s:%d\n",
- assert->file,
- assert->line);
+ loc->file,
+ loc->line);
return;
}
- assert->format(assert, stream);
+ kunit_assert_prologue(loc, type, stream);
+ assert->format(assert, message, stream);
kunit_print_string_stream(test, stream);
@@ -275,29 +278,27 @@ static void __noreturn kunit_abort(struct kunit *test)
WARN_ONCE(true, "Throw could not abort from test!\n");
}
-void kunit_do_assertion(struct kunit *test,
- struct kunit_assert *assert,
- bool pass,
- const char *fmt, ...)
+void kunit_do_failed_assertion(struct kunit *test,
+ const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ struct kunit_assert *assert,
+ const char *fmt, ...)
{
va_list args;
-
- if (pass)
- return;
-
+ struct va_format message;
va_start(args, fmt);
- assert->message.fmt = fmt;
- assert->message.va = &args;
+ message.fmt = fmt;
+ message.va = &args;
- kunit_fail(test, assert);
+ kunit_fail(test, loc, type, assert, &message);
va_end(args);
- if (assert->type == KUNIT_ASSERTION)
+ if (type == KUNIT_ASSERTION)
kunit_abort(test);
}
-EXPORT_SYMBOL_GPL(kunit_do_assertion);
+EXPORT_SYMBOL_GPL(kunit_do_failed_assertion);
void kunit_init_test(struct kunit *test, const char *name, char *log)
{
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
index be38a2c5ecc2..f7825991d576 100644
--- a/lib/kunit/try-catch.c
+++ b/lib/kunit/try-catch.c
@@ -52,7 +52,7 @@ static unsigned long kunit_test_timeout(void)
* If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
* the task will be killed and an oops generated.
*/
- return 300 * MSEC_PER_SEC; /* 5 min */
+ return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */
}
void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
@@ -78,6 +78,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
if (time_remaining == 0) {
kunit_err(test, "try timed out\n");
try_catch->try_result = -ETIMEDOUT;
+ kthread_stop(task_struct);
}
exit_code = try_catch->try_result;
diff --git a/lib/list-test.c b/lib/list-test.c
index ee09505df16f..035ef6597640 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -161,6 +161,26 @@ static void list_test_list_del_init(struct kunit *test)
KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
}
+static void list_test_list_del_init_careful(struct kunit *test)
+{
+ /* NOTE: This test only checks the behaviour of this function in
+ * isolation. It does not verify memory model guarantees.
+ */
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init_careful(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
static void list_test_list_move(struct kunit *test)
{
struct list_head a, b;
@@ -234,6 +254,24 @@ static void list_test_list_bulk_move_tail(struct kunit *test)
KUNIT_EXPECT_EQ(test, i, 2);
}
+static void list_test_list_is_head(struct kunit *test)
+{
+ struct list_head a, b, c;
+
+ /* Two lists: [a] -> b, [c] */
+ INIT_LIST_HEAD(&a);
+ INIT_LIST_HEAD(&c);
+ list_add_tail(&b, &a);
+
+ KUNIT_EXPECT_TRUE_MSG(test, list_is_head(&a, &a),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &b),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &c),
+ "Head element of different list");
+}
+
+
static void list_test_list_is_first(struct kunit *test)
{
struct list_head a, b;
@@ -511,6 +549,26 @@ static void list_test_list_entry(struct kunit *test)
struct list_test_struct, list));
}
+static void list_test_list_entry_is_head(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2, test_struct3;
+
+ INIT_LIST_HEAD(&test_struct1.list);
+ INIT_LIST_HEAD(&test_struct3.list);
+
+ list_add_tail(&test_struct2.list, &test_struct1.list);
+
+ KUNIT_EXPECT_TRUE_MSG(test,
+ list_entry_is_head((&test_struct1), &test_struct1.list, list),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct2), &test_struct1.list, list),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct3), &test_struct1.list, list),
+ "Head element of different list");
+}
+
static void list_test_list_first_entry(struct kunit *test)
{
struct list_test_struct test_struct1, test_struct2;
@@ -707,9 +765,11 @@ static struct kunit_case list_test_cases[] = {
KUNIT_CASE(list_test_list_replace_init),
KUNIT_CASE(list_test_list_swap),
KUNIT_CASE(list_test_list_del_init),
+ KUNIT_CASE(list_test_list_del_init_careful),
KUNIT_CASE(list_test_list_move),
KUNIT_CASE(list_test_list_move_tail),
KUNIT_CASE(list_test_list_bulk_move_tail),
+ KUNIT_CASE(list_test_list_is_head),
KUNIT_CASE(list_test_list_is_first),
KUNIT_CASE(list_test_list_is_last),
KUNIT_CASE(list_test_list_empty),
@@ -724,6 +784,7 @@ static struct kunit_case list_test_cases[] = {
KUNIT_CASE(list_test_list_splice_init),
KUNIT_CASE(list_test_list_splice_tail_init),
KUNIT_CASE(list_test_list_entry),
+ KUNIT_CASE(list_test_list_entry_is_head),
KUNIT_CASE(list_test_list_first_entry),
KUNIT_CASE(list_test_list_last_entry),
KUNIT_CASE(list_test_list_first_entry_or_null),
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
index 8c3365f26e51..b247d412ddef 100644
--- a/lib/logic_iomem.c
+++ b/lib/logic_iomem.c
@@ -68,7 +68,7 @@ int logic_iomem_add_region(struct resource *resource,
}
EXPORT_SYMBOL(logic_iomem_add_region);
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
{
WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
@@ -81,7 +81,7 @@ static void real_iounmap(volatile void __iomem *addr)
WARN(1, "invalid iounmap for addr 0x%llx\n",
(unsigned long long)(uintptr_t __force)addr);
}
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
void __iomem *ioremap(phys_addr_t offset, size_t size)
{
@@ -168,7 +168,7 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
#define MAKE_FALLBACK(op, sz) \
static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
{ \
@@ -213,7 +213,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
(unsigned long long)(uintptr_t __force)addr);
}
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
#define MAKE_OP(op, sz) \
u##sz __raw_read ## op(const volatile void __iomem *addr) \
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 926f4823d5ea..fd1728d94bab 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -271,8 +271,12 @@ static FORCE_INLINE int LZ4_decompress_generic(
ip += length;
op += length;
- /* Necessarily EOF, due to parsing restrictions */
- if (!partialDecoding || (cpy == oend))
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
break;
} else {
/* may overwrite up to WILDCOPYLENGTH beyond cpy */
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 142b680835df..070ba784c9f1 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -242,6 +242,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
}
MPN_NORMALIZE(x->d, x->nlimbs);
}
+EXPORT_SYMBOL_GPL(mpi_rshift);
/****************
* Shift A by COUNT limbs to the left
diff --git a/lib/test_overflow.c b/lib/overflow_kunit.c
index 7a4b6f6c5473..475f0c064bf6 100644
--- a/lib/test_overflow.c
+++ b/lib/overflow_kunit.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
- * Test cases for arithmetic overflow checks.
+ * Test cases for arithmetic overflow checks. See:
+ * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests
+ * ./tools/testing/kunit/kunit.py run overflow [--raw_output]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/test.h>
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -19,7 +21,7 @@
t a, b; \
t sum, diff, prod; \
bool s_of, d_of, p_of; \
- } t ## _tests[] __initconst
+ } t ## _tests[]
DEFINE_TEST_ARRAY(u8) = {
{0, 0, 0, 0, 0, false, false, false},
@@ -220,43 +222,31 @@ DEFINE_TEST_ARRAY(s64) = {
bool _of; \
\
_of = check_ ## op ## _overflow(a, b, &_r); \
- if (_of != of) { \
- pr_warn("expected "fmt" "sym" "fmt \
- " to%s overflow (type %s)\n", \
- a, b, of ? "" : " not", #t); \
- err = 1; \
- } \
- if (_r != r) { \
- pr_warn("expected "fmt" "sym" "fmt" == " \
- fmt", got "fmt" (type %s)\n", \
- a, b, r, _r, #t); \
- err = 1; \
- } \
+ KUNIT_EXPECT_EQ_MSG(test, _of, of, \
+ "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
+ a, b, of ? "" : " not", #t); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, r, \
+ "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
} while (0)
#define DEFINE_TEST_FUNC(t, fmt) \
-static int __init do_test_ ## t(const struct test_ ## t *p) \
+static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \
{ \
- int err = 0; \
- \
check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
- \
- return err; \
} \
\
-static int __init test_ ## t ## _overflow(void) { \
- int err = 0; \
+static void t ## _overflow_test(struct kunit *test) { \
unsigned i; \
\
- pr_info("%-3s: %zu arithmetic tests\n", #t, \
- ARRAY_SIZE(t ## _tests)); \
for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
- err |= do_test_ ## t(&t ## _tests[i]); \
- return err; \
+ do_test_ ## t(test, &t ## _tests[i]); \
+ kunit_info(test, "%zu %s arithmetic tests finished\n", \
+ ARRAY_SIZE(t ## _tests), #t); \
}
DEFINE_TEST_FUNC(u8, "%d");
@@ -270,199 +260,176 @@ DEFINE_TEST_FUNC(u64, "%llu");
DEFINE_TEST_FUNC(s64, "%lld");
#endif
-static int __init test_overflow_calculation(void)
-{
- int err = 0;
-
- err |= test_u8_overflow();
- err |= test_s8_overflow();
- err |= test_u16_overflow();
- err |= test_s16_overflow();
- err |= test_u32_overflow();
- err |= test_s32_overflow();
-#if BITS_PER_LONG == 64
- err |= test_u64_overflow();
- err |= test_s64_overflow();
-#endif
-
- return err;
-}
-
-static int __init test_overflow_shift(void)
+static void overflow_shift_test(struct kunit *test)
{
- int err = 0;
+ int count = 0;
/* Args are: value, shift, type, expected result, overflow expected */
-#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \
- int __failed = 0; \
+#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \
typeof(a) __a = (a); \
typeof(s) __s = (s); \
t __e = (expect); \
t __d; \
bool __of = check_shl_overflow(__a, __s, &__d); \
if (__of != of) { \
- pr_warn("expected (%s)(%s << %s) to%s overflow\n", \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected (%s)(%s << %s) to%s overflow\n", \
#t, #a, #s, of ? "" : " not"); \
- __failed = 1; \
} else if (!__of && __d != __e) { \
- pr_warn("expected (%s)(%s << %s) == %s\n", \
+ KUNIT_EXPECT_EQ_MSG(test, __d, __e, \
+ "expected (%s)(%s << %s) == %s\n", \
#t, #a, #s, #expect); \
if ((t)-1 < 0) \
- pr_warn("got %lld\n", (s64)__d); \
+ kunit_info(test, "got %lld\n", (s64)__d); \
else \
- pr_warn("got %llu\n", (u64)__d); \
- __failed = 1; \
+ kunit_info(test, "got %llu\n", (u64)__d); \
} \
- if (!__failed) \
- pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \
- of ? "overflow" : #expect); \
- __failed; \
-})
+ count++; \
+} while (0)
/* Sane shifts. */
- err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
- err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
- err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
- err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
- err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
- err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
- err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
- err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
- err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
- err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
- err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
- err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
- err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
- err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
- err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
- err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
- err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
- err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
- err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
- err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
- err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64,
- 0xFFFFFFFFULL << 32, false);
+ TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
+ TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
+ TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
+ TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
+ TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
+ TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
+ TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
+ TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
+ TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
+ TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false);
/* Sane shift: start and end with 0, without a too-wide shift. */
- err |= TEST_ONE_SHIFT(0, 7, u8, 0, false);
- err |= TEST_ONE_SHIFT(0, 15, u16, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, u32, 0, false);
- err |= TEST_ONE_SHIFT(0, 63, u64, 0, false);
+ TEST_ONE_SHIFT(0, 7, u8, 0, false);
+ TEST_ONE_SHIFT(0, 15, u16, 0, false);
+ TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
+ TEST_ONE_SHIFT(0, 31, u32, 0, false);
+ TEST_ONE_SHIFT(0, 63, u64, 0, false);
/* Sane shift: start and end with 0, without reaching signed bit. */
- err |= TEST_ONE_SHIFT(0, 6, s8, 0, false);
- err |= TEST_ONE_SHIFT(0, 14, s16, 0, false);
- err |= TEST_ONE_SHIFT(0, 30, int, 0, false);
- err |= TEST_ONE_SHIFT(0, 30, s32, 0, false);
- err |= TEST_ONE_SHIFT(0, 62, s64, 0, false);
+ TEST_ONE_SHIFT(0, 6, s8, 0, false);
+ TEST_ONE_SHIFT(0, 14, s16, 0, false);
+ TEST_ONE_SHIFT(0, 30, int, 0, false);
+ TEST_ONE_SHIFT(0, 30, s32, 0, false);
+ TEST_ONE_SHIFT(0, 62, s64, 0, false);
/* Overflow: shifted the bit off the end. */
- err |= TEST_ONE_SHIFT(1, 8, u8, 0, true);
- err |= TEST_ONE_SHIFT(1, 16, u16, 0, true);
- err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(1, 32, u32, 0, true);
- err |= TEST_ONE_SHIFT(1, 64, u64, 0, true);
+ TEST_ONE_SHIFT(1, 8, u8, 0, true);
+ TEST_ONE_SHIFT(1, 16, u16, 0, true);
+ TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
+ TEST_ONE_SHIFT(1, 32, u32, 0, true);
+ TEST_ONE_SHIFT(1, 64, u64, 0, true);
/* Overflow: shifted into the signed bit. */
- err |= TEST_ONE_SHIFT(1, 7, s8, 0, true);
- err |= TEST_ONE_SHIFT(1, 15, s16, 0, true);
- err |= TEST_ONE_SHIFT(1, 31, int, 0, true);
- err |= TEST_ONE_SHIFT(1, 31, s32, 0, true);
- err |= TEST_ONE_SHIFT(1, 63, s64, 0, true);
+ TEST_ONE_SHIFT(1, 7, s8, 0, true);
+ TEST_ONE_SHIFT(1, 15, s16, 0, true);
+ TEST_ONE_SHIFT(1, 31, int, 0, true);
+ TEST_ONE_SHIFT(1, 31, s32, 0, true);
+ TEST_ONE_SHIFT(1, 63, s64, 0, true);
/* Overflow: high bit falls off unsigned types. */
/* 10010110 */
- err |= TEST_ONE_SHIFT(150, 1, u8, 0, true);
+ TEST_ONE_SHIFT(150, 1, u8, 0, true);
/* 1000100010010110 */
- err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true);
+ TEST_ONE_SHIFT(34966, 1, u16, 0, true);
/* 10000100000010001000100010010110 */
- err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
- err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
+ TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
+ TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
/* 1000001000010000010000000100000010000100000010001000100010010110 */
- err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
+ TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
/* Overflow: bit shifted into signed bit on signed types. */
/* 01001011 */
- err |= TEST_ONE_SHIFT(75, 1, s8, 0, true);
+ TEST_ONE_SHIFT(75, 1, s8, 0, true);
/* 0100010001001011 */
- err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true);
+ TEST_ONE_SHIFT(17483, 1, s16, 0, true);
/* 01000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
- err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
+ TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
/* 0100000100001000001000000010000001000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
+ TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
/* Overflow: bit shifted past signed bit on signed types. */
/* 01001011 */
- err |= TEST_ONE_SHIFT(75, 2, s8, 0, true);
+ TEST_ONE_SHIFT(75, 2, s8, 0, true);
/* 0100010001001011 */
- err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true);
+ TEST_ONE_SHIFT(17483, 2, s16, 0, true);
/* 01000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
- err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
+ TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
/* 0100000100001000001000000010000001000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+ TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
/* Overflow: values larger than destination type. */
- err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
- err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
- err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
- err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
- err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+ TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
+ TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
+ TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
+ TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
/* Nonsense: negative initial value. */
- err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true);
- err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true);
- err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true);
- err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true);
- err |= TEST_ONE_SHIFT(-10, 0, int, 0, true);
- err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true);
- err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true);
- err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
- err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
+ TEST_ONE_SHIFT(-1, 0, s8, 0, true);
+ TEST_ONE_SHIFT(-1, 0, u8, 0, true);
+ TEST_ONE_SHIFT(-5, 0, s16, 0, true);
+ TEST_ONE_SHIFT(-5, 0, u16, 0, true);
+ TEST_ONE_SHIFT(-10, 0, int, 0, true);
+ TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(-100, 0, s32, 0, true);
+ TEST_ONE_SHIFT(-100, 0, u32, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
/* Nonsense: negative shift values. */
- err |= TEST_ONE_SHIFT(0, -5, s8, 0, true);
- err |= TEST_ONE_SHIFT(0, -5, u8, 0, true);
- err |= TEST_ONE_SHIFT(0, -10, s16, 0, true);
- err |= TEST_ONE_SHIFT(0, -10, u16, 0, true);
- err |= TEST_ONE_SHIFT(0, -15, int, 0, true);
- err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(0, -20, s32, 0, true);
- err |= TEST_ONE_SHIFT(0, -20, u32, 0, true);
- err |= TEST_ONE_SHIFT(0, -30, s64, 0, true);
- err |= TEST_ONE_SHIFT(0, -30, u64, 0, true);
+ TEST_ONE_SHIFT(0, -5, s8, 0, true);
+ TEST_ONE_SHIFT(0, -5, u8, 0, true);
+ TEST_ONE_SHIFT(0, -10, s16, 0, true);
+ TEST_ONE_SHIFT(0, -10, u16, 0, true);
+ TEST_ONE_SHIFT(0, -15, int, 0, true);
+ TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0, -20, s32, 0, true);
+ TEST_ONE_SHIFT(0, -20, u32, 0, true);
+ TEST_ONE_SHIFT(0, -30, s64, 0, true);
+ TEST_ONE_SHIFT(0, -30, u64, 0, true);
/* Overflow: shifted at or beyond entire type's bit width. */
- err |= TEST_ONE_SHIFT(0, 8, u8, 0, true);
- err |= TEST_ONE_SHIFT(0, 9, u8, 0, true);
- err |= TEST_ONE_SHIFT(0, 8, s8, 0, true);
- err |= TEST_ONE_SHIFT(0, 9, s8, 0, true);
- err |= TEST_ONE_SHIFT(0, 16, u16, 0, true);
- err |= TEST_ONE_SHIFT(0, 17, u16, 0, true);
- err |= TEST_ONE_SHIFT(0, 16, s16, 0, true);
- err |= TEST_ONE_SHIFT(0, 17, s16, 0, true);
- err |= TEST_ONE_SHIFT(0, 32, u32, 0, true);
- err |= TEST_ONE_SHIFT(0, 33, u32, 0, true);
- err |= TEST_ONE_SHIFT(0, 32, int, 0, true);
- err |= TEST_ONE_SHIFT(0, 33, int, 0, true);
- err |= TEST_ONE_SHIFT(0, 32, s32, 0, true);
- err |= TEST_ONE_SHIFT(0, 33, s32, 0, true);
- err |= TEST_ONE_SHIFT(0, 64, u64, 0, true);
- err |= TEST_ONE_SHIFT(0, 65, u64, 0, true);
- err |= TEST_ONE_SHIFT(0, 64, s64, 0, true);
- err |= TEST_ONE_SHIFT(0, 65, s64, 0, true);
+ TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ TEST_ONE_SHIFT(0, 32, int, 0, true);
+ TEST_ONE_SHIFT(0, 33, int, 0, true);
+ TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ TEST_ONE_SHIFT(0, 65, s64, 0, true);
/*
* Corner case: for unsigned types, we fail when we've shifted
@@ -473,13 +440,14 @@ static int __init test_overflow_shift(void)
* signed bit). So, for now, we will test this condition but
* mark it as not expected to overflow.
*/
- err |= TEST_ONE_SHIFT(0, 7, s8, 0, false);
- err |= TEST_ONE_SHIFT(0, 15, s16, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, int, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, s32, 0, false);
- err |= TEST_ONE_SHIFT(0, 63, s64, 0, false);
-
- return err;
+ TEST_ONE_SHIFT(0, 7, s8, 0, false);
+ TEST_ONE_SHIFT(0, 15, s16, 0, false);
+ TEST_ONE_SHIFT(0, 31, int, 0, false);
+ TEST_ONE_SHIFT(0, 31, s32, 0, false);
+ TEST_ONE_SHIFT(0, 63, s64, 0, false);
+
+ kunit_info(test, "%d shift tests finished\n", count);
+#undef TEST_ONE_SHIFT
}
/*
@@ -499,7 +467,7 @@ static int __init test_overflow_shift(void)
#define TEST_SIZE (5 * 4096)
#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
-static int __init test_ ## func (void *arg) \
+static void test_ ## func (struct kunit *test, void *arg) \
{ \
volatile size_t a = TEST_SIZE; \
volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
@@ -507,31 +475,24 @@ static int __init test_ ## func (void *arg) \
\
/* Tiny allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
- if (!ptr) { \
- pr_warn(#func " failed regular allocation?!\n"); \
- return 1; \
- } \
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " failed regular allocation?!\n"); \
free ## want_arg (free_func, arg, ptr); \
\
/* Wrapped allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
a * b); \
- if (!ptr) { \
- pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \
- return 1; \
- } \
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " unexpectedly failed bad wrapping?!\n"); \
free ## want_arg (free_func, arg, ptr); \
\
/* Saturated allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
array_size(a, b)); \
if (ptr) { \
- pr_warn(#func " missed saturation!\n"); \
+ KUNIT_FAIL(test, #func " missed saturation!\n"); \
free ## want_arg (free_func, arg, ptr); \
- return 1; \
} \
- pr_info(#func " detected saturation\n"); \
- return 0; \
}
/*
@@ -544,10 +505,7 @@ DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
-DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0);
-DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1);
-DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0);
-DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1);
+DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
@@ -555,60 +513,158 @@ DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
-static int __init test_overflow_allocation(void)
+static void overflow_allocation_test(struct kunit *test)
{
const char device_name[] = "overflow-test";
struct device *dev;
- int err = 0;
+ int count = 0;
+
+#define check_allocation_overflow(alloc) do { \
+ count++; \
+ test_ ## alloc(test, dev); \
+} while (0)
/* Create dummy device for devm_kmalloc()-family tests. */
dev = root_device_register(device_name);
- if (IS_ERR(dev)) {
- pr_warn("Cannot register test device\n");
- return 1;
- }
-
- err |= test_kmalloc(NULL);
- err |= test_kmalloc_node(NULL);
- err |= test_kzalloc(NULL);
- err |= test_kzalloc_node(NULL);
- err |= test_kvmalloc(NULL);
- err |= test_kvmalloc_node(NULL);
- err |= test_kvzalloc(NULL);
- err |= test_kvzalloc_node(NULL);
- err |= test_vmalloc(NULL);
- err |= test_vmalloc_node(NULL);
- err |= test_vzalloc(NULL);
- err |= test_vzalloc_node(NULL);
- err |= test_devm_kmalloc(dev);
- err |= test_devm_kzalloc(dev);
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
+ "Cannot register test device\n");
+
+ check_allocation_overflow(kmalloc);
+ check_allocation_overflow(kmalloc_node);
+ check_allocation_overflow(kzalloc);
+ check_allocation_overflow(kzalloc_node);
+ check_allocation_overflow(__vmalloc);
+ check_allocation_overflow(kvmalloc);
+ check_allocation_overflow(kvmalloc_node);
+ check_allocation_overflow(kvzalloc);
+ check_allocation_overflow(kvzalloc_node);
+ check_allocation_overflow(devm_kmalloc);
+ check_allocation_overflow(devm_kzalloc);
device_unregister(dev);
- return err;
+ kunit_info(test, "%d allocation overflow tests finished\n", count);
+#undef check_allocation_overflow
}
-static int __init test_module_init(void)
+struct __test_flex_array {
+ unsigned long flags;
+ size_t count;
+ unsigned long data[];
+};
+
+static void overflow_size_helpers_test(struct kunit *test)
{
- int err = 0;
+ /* Make sure struct_size() can be used in a constant expression. */
+ u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)];
+ struct __test_flex_array *obj;
+ int count = 0;
+ int var;
+ volatile int unconst = 0;
+
+ /* Verify constant expression against runtime version. */
+ var = 55;
+ OPTIMIZER_HIDE_VAR(var);
+ KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var));
+
+#define check_one_size_helper(expected, func, args...) do { \
+ size_t _r = func(args); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, expected, \
+ "expected " #func "(" #args ") to return %zu but got %zu instead\n", \
+ (size_t)(expected), _r); \
+ count++; \
+} while (0)
- err |= test_overflow_calculation();
- err |= test_overflow_shift();
- err |= test_overflow_allocation();
+ var = 4;
+ check_one_size_helper(20, size_mul, var++, 5);
+ check_one_size_helper(20, size_mul, 4, var++);
+ check_one_size_helper(0, size_mul, 0, 3);
+ check_one_size_helper(0, size_mul, 3, 0);
+ check_one_size_helper(6, size_mul, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(9, size_add, var++, 5);
+ check_one_size_helper(9, size_add, 4, var++);
+ check_one_size_helper(9, size_add, 9, 0);
+ check_one_size_helper(9, size_add, 0, 9);
+ check_one_size_helper(5, size_add, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(1, size_sub, var--, 3);
+ check_one_size_helper(1, size_sub, 4, var--);
+ check_one_size_helper(1, size_sub, 3, 2);
+ check_one_size_helper(9, size_sub, 9, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, 9, -3);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, 9);
+ check_one_size_helper(SIZE_MAX, size_sub, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1);
+ check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3);
+ check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3);
+
+ var = 4;
+ check_one_size_helper(4 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(5 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj->data),
+ flex_array_size, obj, data, 1 + unconst);
+ check_one_size_helper(7 * sizeof(*obj->data),
+ flex_array_size, obj, data, 7 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, -1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, SIZE_MAX - 4 + unconst);
+
+ var = 4;
+ check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj) + sizeof(*obj->data),
+ struct_size, obj, data, 1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, -3 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, SIZE_MAX - 3 + unconst);
+
+ kunit_info(test, "%d overflow size helper tests finished\n", count);
+#undef check_one_size_helper
+}
- if (err) {
- pr_warn("FAIL!\n");
- err = -EINVAL;
- } else {
- pr_info("all tests passed\n");
- }
+static struct kunit_case overflow_test_cases[] = {
+ KUNIT_CASE(u8_overflow_test),
+ KUNIT_CASE(s8_overflow_test),
+ KUNIT_CASE(u16_overflow_test),
+ KUNIT_CASE(s16_overflow_test),
+ KUNIT_CASE(u32_overflow_test),
+ KUNIT_CASE(s32_overflow_test),
+#if BITS_PER_LONG == 64
+ KUNIT_CASE(u64_overflow_test),
+ KUNIT_CASE(s64_overflow_test),
+#endif
+ KUNIT_CASE(overflow_shift_test),
+ KUNIT_CASE(overflow_allocation_test),
+ KUNIT_CASE(overflow_size_helpers_test),
+ {}
+};
- return err;
-}
+static struct kunit_suite overflow_test_suite = {
+ .name = "overflow",
+ .test_cases = overflow_test_cases,
+};
-static void __exit test_module_exit(void)
-{ }
+kunit_test_suite(overflow_test_suite);
-module_init(test_module_init);
-module_exit(test_module_exit);
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index a4c7cd74cff5..4fb7700a741b 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -4,6 +4,8 @@
# from userspace.
#
+pound := \#
+
CC = gcc
OPTFLAGS = -O2 # Adjust as desired
CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
@@ -42,7 +44,7 @@ else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
- HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
+ HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\
gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
ifeq ($(HAS_ALTIVEC),yes)
CFLAGS += -I../../../arch/powerpc/include
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
index a3cf071941ab..841a55242aba 100644
--- a/lib/raid6/test/test.c
+++ b/lib/raid6/test/test.c
@@ -19,7 +19,6 @@
#define NDISKS 16 /* Including P and Q */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
-struct raid6_calls raid6_call;
char *dataptrs[NDISKS];
char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
index 10475dc423c1..1bfb127fbfe8 100644
--- a/lib/raid6/vpermxor.uc
+++ b/lib/raid6/vpermxor.uc
@@ -24,9 +24,9 @@
#ifdef CONFIG_ALTIVEC
#include <altivec.h>
+#include <asm/ppc-opcode.h>
#ifdef __KERNEL__
#include <asm/cputable.h>
-#include <asm/ppc-opcode.h>
#include <asm/switch_to.h>
#endif
diff --git a/lib/random32.c b/lib/random32.c
index a57a0e18819d..976632003ec6 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -41,7 +41,6 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
-#include <trace/events/random.h>
/**
* prandom_u32_state - seeded pseudo-random number generator.
@@ -387,7 +386,6 @@ u32 prandom_u32(void)
struct siprand_state *state = get_cpu_ptr(&net_rand_state);
u32 res = siprand_u32(state);
- trace_prandom_u32(res);
put_cpu_ptr(&net_rand_state);
return res;
}
@@ -553,9 +551,11 @@ static void prandom_reseed(struct timer_list *unused)
* To avoid worrying about whether it's safe to delay that interrupt
* long enough to seed all CPUs, just schedule an immediate timer event.
*/
-static void prandom_timer_start(struct random_ready_callback *unused)
+static int prandom_timer_start(struct notifier_block *nb,
+ unsigned long action, void *data)
{
mod_timer(&seed_timer, jiffies);
+ return 0;
}
#ifdef CONFIG_RANDOM32_SELFTEST
@@ -619,13 +619,13 @@ core_initcall(prandom32_state_selftest);
*/
static int __init prandom_init_late(void)
{
- static struct random_ready_callback random_ready = {
- .func = prandom_timer_start
+ static struct notifier_block random_ready = {
+ .notifier_call = prandom_timer_start
};
- int ret = add_random_ready_callback(&random_ready);
+ int ret = register_random_ready_notifier(&random_ready);
if (ret == -EALREADY) {
- prandom_timer_start(&random_ready);
+ prandom_timer_start(&random_ready, 0, NULL);
ret = 0;
}
return ret;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 09d293c30fd2..ae4fd4de9ebe 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -85,7 +85,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
bool alloc_hint)
{
unsigned int bits_per_word;
- unsigned int i;
if (shift < 0)
shift = sbitmap_calculate_shift(depth);
@@ -111,16 +110,12 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
sb->alloc_hint = NULL;
}
- sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
+ sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
if (!sb->map) {
free_percpu(sb->alloc_hint);
return -ENOMEM;
}
- for (i = 0; i < sb->map_nr; i++) {
- sb->map[i].depth = min(depth, bits_per_word);
- depth -= sb->map[i].depth;
- }
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);
@@ -135,11 +130,6 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
-
- for (i = 0; i < sb->map_nr; i++) {
- sb->map[i].depth = min(depth, bits_per_word);
- depth -= sb->map[i].depth;
- }
}
EXPORT_SYMBOL_GPL(sbitmap_resize);
@@ -184,8 +174,8 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
int nr;
do {
- nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
- !sb->round_robin);
+ nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
+ alloc_hint, !sb->round_robin);
if (nr != -1)
break;
if (!sbitmap_deferred_clear(map))
@@ -257,7 +247,9 @@ static int __sbitmap_get_shallow(struct sbitmap *sb,
for (i = 0; i < sb->map_nr; i++) {
again:
nr = __sbitmap_get_word(&sb->map[index].word,
- min(sb->map[index].depth, shallow_depth),
+ min_t(unsigned int,
+ __map_depth(sb, index),
+ shallow_depth),
SB_NR_TO_BIT(sb, alloc_hint), true);
if (nr != -1) {
nr += index << sb->shift;
@@ -315,11 +307,12 @@ static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
+ unsigned int word_depth = __map_depth(sb, i);
if (set)
- weight += bitmap_weight(&word->word, word->depth);
+ weight += bitmap_weight(&word->word, word_depth);
else
- weight += bitmap_weight(&word->cleared, word->depth);
+ weight += bitmap_weight(&word->cleared, word_depth);
}
return weight;
}
@@ -367,7 +360,7 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
for (i = 0; i < sb->map_nr; i++) {
unsigned long word = READ_ONCE(sb->map[i].word);
unsigned long cleared = READ_ONCE(sb->map[i].cleared);
- unsigned int word_bits = READ_ONCE(sb->map[i].depth);
+ unsigned int word_bits = __map_depth(sb, i);
word &= ~cleared;
@@ -531,15 +524,16 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
for (i = 0; i < sb->map_nr; i++) {
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
+ unsigned int map_depth = __map_depth(sb, index);
sbitmap_deferred_clear(map);
- if (map->word == (1UL << (map->depth - 1)) - 1)
+ if (map->word == (1UL << (map_depth - 1)) - 1)
continue;
- nr = find_first_zero_bit(&map->word, map->depth);
- if (nr + nr_tags <= map->depth) {
+ nr = find_first_zero_bit(&map->word, map_depth);
+ if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
- int map_tags = min_t(int, nr_tags, map->depth);
+ int map_tags = min_t(int, nr_tags, map_depth);
unsigned long val, ret;
get_mask = ((1UL << map_tags) - 1) << nr;
@@ -563,14 +557,14 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
return 0;
}
-int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int shallow_depth)
+int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth)
{
WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
return sbitmap_get_shallow(&sbq->sb, shallow_depth);
}
-EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
+EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
unsigned int min_shallow_depth)
diff --git a/lib/sort.c b/lib/sort.c
index aa18153864d2..b399bf10d675 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -122,16 +122,27 @@ static void swap_bytes(void *a, void *b, size_t n)
* a pointer, but small integers make for the smallest compare
* instructions.
*/
-#define SWAP_WORDS_64 (swap_func_t)0
-#define SWAP_WORDS_32 (swap_func_t)1
-#define SWAP_BYTES (swap_func_t)2
+#define SWAP_WORDS_64 (swap_r_func_t)0
+#define SWAP_WORDS_32 (swap_r_func_t)1
+#define SWAP_BYTES (swap_r_func_t)2
+#define SWAP_WRAPPER (swap_r_func_t)3
+
+struct wrapper {
+ cmp_func_t cmp;
+ swap_func_t swap;
+};
/*
* The function pointer is last to make tail calls most efficient if the
* compiler decides not to inline this function.
*/
-static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
+static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
{
+ if (swap_func == SWAP_WRAPPER) {
+ ((const struct wrapper *)priv)->swap(a, b, (int)size);
+ return;
+ }
+
if (swap_func == SWAP_WORDS_64)
swap_words_64(a, b, size);
else if (swap_func == SWAP_WORDS_32)
@@ -139,7 +150,7 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
else if (swap_func == SWAP_BYTES)
swap_bytes(a, b, size);
else
- swap_func(a, b, (int)size);
+ swap_func(a, b, (int)size, priv);
}
#define _CMP_WRAPPER ((cmp_r_func_t)0L)
@@ -147,7 +158,7 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
{
if (cmp == _CMP_WRAPPER)
- return ((cmp_func_t)(priv))(a, b);
+ return ((const struct wrapper *)priv)->cmp(a, b);
return cmp(a, b, priv);
}
@@ -198,7 +209,7 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
*/
void sort_r(void *base, size_t num, size_t size,
cmp_r_func_t cmp_func,
- swap_func_t swap_func,
+ swap_r_func_t swap_func,
const void *priv)
{
/* pre-scale counters for performance */
@@ -208,6 +219,10 @@ void sort_r(void *base, size_t num, size_t size,
if (!a) /* num < 2 || size == 0 */
return;
+ /* called from 'sort' without swap function, let's pick the default */
+ if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap)
+ swap_func = NULL;
+
if (!swap_func) {
if (is_aligned(base, size, 8))
swap_func = SWAP_WORDS_64;
@@ -230,7 +245,7 @@ void sort_r(void *base, size_t num, size_t size,
if (a) /* Building heap: sift down --a */
a -= size;
else if (n -= size) /* Sorting: Extract root to --n */
- do_swap(base, base + n, size, swap_func);
+ do_swap(base, base + n, size, swap_func, priv);
else /* Sort complete */
break;
@@ -257,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size,
c = b; /* Where "a" belongs */
while (b != a) { /* Shift it into place */
b = parent(b, lsbit, size);
- do_swap(base + b, base + c, size, swap_func);
+ do_swap(base + b, base + c, size, swap_func, priv);
}
}
}
@@ -267,6 +282,11 @@ void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func)
{
- return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap = swap_func,
+ };
+
+ return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
}
EXPORT_SYMBOL(sort);
diff --git a/lib/test_stackinit.c b/lib/stackinit_kunit.c
index a3c74e6a21ff..35c69aa425b2 100644
--- a/lib/test_stackinit.c
+++ b/lib/stackinit_kunit.c
@@ -2,76 +2,21 @@
/*
* Test cases for compiler-based stack variable zeroing via
* -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * For example, see:
+ * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests
+ * ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
+ * --make_option LLVM=1 \
+ * --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y
*
- * External build example:
- * clang -O2 -Wall -ftrivial-auto-var-init=pattern \
- * -o test_stackinit test_stackinit.c
*/
-#ifdef __KERNEL__
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/test.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-#else
-
-/* Userspace headers. */
-#include <stdio.h>
-#include <stdint.h>
-#include <string.h>
-#include <stdbool.h>
-#include <errno.h>
-#include <sys/types.h>
-
-/* Linux kernel-ism stubs for stand-alone userspace build. */
-#define KBUILD_MODNAME "stackinit"
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define pr_err(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...) fprintf(stdout, pr_fmt(fmt), ##__VA_ARGS__)
-#define __init /**/
-#define __exit /**/
-#define __user /**/
-#define noinline __attribute__((__noinline__))
-#define __aligned(x) __attribute__((__aligned__(x)))
-#ifdef __clang__
-# define __compiletime_error(message) /**/
-#else
-# define __compiletime_error(message) __attribute__((__error__(message)))
-#endif
-#define __compiletime_assert(condition, msg, prefix, suffix) \
- do { \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
- if (!(condition)) \
- prefix ## suffix(); \
- } while (0)
-#define _compiletime_assert(condition, msg, prefix, suffix) \
- __compiletime_assert(condition, msg, prefix, suffix)
-#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
-#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
-#define BUILD_BUG_ON(condition) \
- BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
-
-#define module_init(func) static int (*do_init)(void) = func
-#define module_exit(func) static void (*do_exit)(void) = func
-#define MODULE_LICENSE(str) int main(void) { \
- int rc; \
- /* License: str */ \
- rc = do_init(); \
- if (rc == 0) \
- do_exit(); \
- return rc; \
- }
-
-#endif /* __KERNEL__ */
-
/* Exfiltration buffer. */
#define MAX_VAR_SIZE 128
static u8 check_buf[MAX_VAR_SIZE];
@@ -201,7 +146,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
*/
#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
/* Returns 0 on success, 1 on failure. */ \
-static noinline __init int test_ ## name (void) \
+static noinline void test_ ## name (struct kunit *test) \
{ \
var_type zero INIT_CLONE_ ## which; \
int ignored; \
@@ -220,10 +165,8 @@ static noinline __init int test_ ## name (void) \
/* Verify all bytes overwritten with 0xFF. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] != 0xFF); \
- if (sum) { \
- pr_err(#name ": leaf fill was not 0xFF!?\n"); \
- return 1; \
- } \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "leaf fill was not 0xFF!?\n"); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
@@ -231,32 +174,29 @@ static noinline __init int test_ ## name (void) \
FETCH_ARG_ ## which(zero)); \
\
/* Validate that compiler lined up fill and target. */ \
- if (!range_contains(fill_start, fill_size, \
- target_start, target_size)) { \
- pr_err(#name ": stack fill missed target!?\n"); \
- pr_err(#name ": fill %zu wide\n", fill_size); \
- pr_err(#name ": target offset by %d\n", \
- (int)((ssize_t)(uintptr_t)fill_start - \
- (ssize_t)(uintptr_t)target_start)); \
- return 1; \
- } \
+ KUNIT_ASSERT_TRUE_MSG(test, \
+ range_contains(fill_start, fill_size, \
+ target_start, target_size), \
+ "stack fill missed target!? " \
+ "(fill %zu wide, target offset by %d)\n", \
+ fill_size, \
+ (int)((ssize_t)(uintptr_t)fill_start - \
+ (ssize_t)(uintptr_t)target_start)); \
\
/* Look for any bytes still 0xFF in check region. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] == 0xFF); \
\
- if (sum == 0) { \
- pr_info(#name " ok\n"); \
- return 0; \
- } else { \
- pr_warn(#name " %sFAIL (uninit bytes: %d)\n", \
- (xfail) ? "X" : "", sum); \
- return (xfail) ? 0 : 1; \
- } \
+ if (sum != 0 && xfail) \
+ kunit_skip(test, \
+ "XFAIL uninit bytes: %d\n", \
+ sum); \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "uninit bytes: %d\n", sum); \
}
#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
-static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \
+static noinline DO_NOTHING_TYPE_ ## which(var_type) \
do_nothing_ ## name(var_type *ptr) \
{ \
/* Will always be true, but compiler doesn't know. */ \
@@ -265,9 +205,8 @@ do_nothing_ ## name(var_type *ptr) \
else \
return DO_NOTHING_RETURN_ ## which(ptr + 1); \
} \
-static noinline __init int leaf_ ## name(unsigned long sp, \
- bool fill, \
- var_type *arg) \
+static noinline int leaf_ ## name(unsigned long sp, bool fill, \
+ var_type *arg) \
{ \
char buf[VAR_BUFFER]; \
var_type var \
@@ -341,6 +280,27 @@ struct test_user {
unsigned long four;
};
+#define ALWAYS_PASS WANT_SUCCESS
+#define ALWAYS_FAIL XFAIL
+
+#ifdef CONFIG_INIT_STACK_NONE
+# define USER_PASS XFAIL
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS XFAIL
+#else
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS WANT_SUCCESS
+#endif
+
#define DEFINE_SCALAR_TEST(name, init, xfail) \
DEFINE_TEST(name ## _ ## init, name, SCALAR, \
init, xfail)
@@ -364,27 +324,26 @@ struct test_user {
DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
DEFINE_STRUCT_TEST(packed, init, xfail)
-#define DEFINE_STRUCT_INITIALIZER_TESTS(base) \
+#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \
DEFINE_STRUCT_TESTS(base ## _ ## partial, \
- WANT_SUCCESS); \
- DEFINE_STRUCT_TESTS(base ## _ ## all, \
- WANT_SUCCESS)
+ xfail); \
+ DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
/* These should be fully initialized all the time! */
-DEFINE_SCALAR_TESTS(zero, WANT_SUCCESS);
-DEFINE_STRUCT_TESTS(zero, WANT_SUCCESS);
+DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
/* Struct initializers: padding may be left uninitialized. */
-DEFINE_STRUCT_INITIALIZER_TESTS(static);
-DEFINE_STRUCT_INITIALIZER_TESTS(dynamic);
-DEFINE_STRUCT_INITIALIZER_TESTS(runtime);
-DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static);
-DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic);
-DEFINE_STRUCT_TESTS(assigned_copy, XFAIL);
+DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
/* No initialization without compiler instrumentation. */
-DEFINE_SCALAR_TESTS(none, WANT_SUCCESS);
-DEFINE_STRUCT_TESTS(none, WANT_SUCCESS);
+DEFINE_SCALAR_TESTS(none, STRONG_PASS);
+DEFINE_STRUCT_TESTS(none, BYREF_PASS);
/* Initialization of members with __user attribute. */
-DEFINE_TEST(user, struct test_user, STRUCT, none, WANT_SUCCESS);
+DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS);
/*
* Check two uses through a variable declaration outside either path,
@@ -398,7 +357,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
* This is intentionally unreachable. To silence the
* warning, build with -Wno-switch-unreachable
*/
- uint64_t var;
+ uint64_t var[10];
case 1:
target_start = &var;
@@ -423,19 +382,19 @@ static int noinline __leaf_switch_none(int path, bool fill)
memcpy(check_buf, target_start, target_size);
break;
default:
- var = 5;
- return var & forced_mask;
+ var[1] = 5;
+ return var[1] & forced_mask;
}
return 0;
}
-static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill,
+static noinline int leaf_switch_1_none(unsigned long sp, bool fill,
uint64_t *arg)
{
return __leaf_switch_none(1, fill);
}
-static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
+static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
uint64_t *arg)
{
return __leaf_switch_none(2, fill);
@@ -447,68 +406,56 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
* non-code areas (i.e. in a switch statement before the first "case").
* https://bugs.llvm.org/show_bug.cgi?id=44916
*/
-DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, XFAIL);
-DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, XFAIL);
-
-static int __init test_stackinit_init(void)
-{
- unsigned int failures = 0;
-
-#define test_scalars(init) do { \
- failures += test_u8_ ## init (); \
- failures += test_u16_ ## init (); \
- failures += test_u32_ ## init (); \
- failures += test_u64_ ## init (); \
- failures += test_char_array_ ## init (); \
- } while (0)
-
-#define test_structs(init) do { \
- failures += test_small_hole_ ## init (); \
- failures += test_big_hole_ ## init (); \
- failures += test_trailing_hole_ ## init (); \
- failures += test_packed_ ## init (); \
- } while (0)
-
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
+
+#define KUNIT_test_scalars(init) \
+ KUNIT_CASE(test_u8_ ## init), \
+ KUNIT_CASE(test_u16_ ## init), \
+ KUNIT_CASE(test_u32_ ## init), \
+ KUNIT_CASE(test_u64_ ## init), \
+ KUNIT_CASE(test_char_array_ ## init)
+
+#define KUNIT_test_structs(init) \
+ KUNIT_CASE(test_small_hole_ ## init), \
+ KUNIT_CASE(test_big_hole_ ## init), \
+ KUNIT_CASE(test_trailing_hole_ ## init),\
+ KUNIT_CASE(test_packed_ ## init) \
+
+static struct kunit_case stackinit_test_cases[] = {
/* These are explicitly initialized and should always pass. */
- test_scalars(zero);
- test_structs(zero);
+ KUNIT_test_scalars(zero),
+ KUNIT_test_structs(zero),
/* Padding here appears to be accidentally always initialized? */
- test_structs(dynamic_partial);
- test_structs(assigned_dynamic_partial);
+ KUNIT_test_structs(dynamic_partial),
+ KUNIT_test_structs(assigned_dynamic_partial),
/* Padding initialization depends on compiler behaviors. */
- test_structs(static_partial);
- test_structs(static_all);
- test_structs(dynamic_all);
- test_structs(runtime_partial);
- test_structs(runtime_all);
- test_structs(assigned_static_partial);
- test_structs(assigned_static_all);
- test_structs(assigned_dynamic_all);
+ KUNIT_test_structs(static_partial),
+ KUNIT_test_structs(static_all),
+ KUNIT_test_structs(dynamic_all),
+ KUNIT_test_structs(runtime_partial),
+ KUNIT_test_structs(runtime_all),
+ KUNIT_test_structs(assigned_static_partial),
+ KUNIT_test_structs(assigned_static_all),
+ KUNIT_test_structs(assigned_dynamic_all),
/* Everything fails this since it effectively performs a memcpy(). */
- test_structs(assigned_copy);
-
+ KUNIT_test_structs(assigned_copy),
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
- test_scalars(none);
- failures += test_switch_1_none();
- failures += test_switch_2_none();
-
+ KUNIT_test_scalars(none),
+ KUNIT_CASE(test_switch_1_none),
+ KUNIT_CASE(test_switch_2_none),
/* STRUCTLEAK_BYREF should cover from here down. */
- test_structs(none);
-
+ KUNIT_test_structs(none),
/* STRUCTLEAK will only cover this. */
- failures += test_user();
-
- if (failures == 0)
- pr_info("all tests passed!\n");
- else
- pr_err("failures: %u\n", failures);
+ KUNIT_CASE(test_user),
+ {}
+};
- return failures ? -EINVAL : 0;
-}
-module_init(test_stackinit_init);
+static struct kunit_suite stackinit_test_suite = {
+ .name = "stackinit",
+ .test_cases = stackinit_test_cases,
+};
-static void __exit test_stackinit_exit(void)
-{ }
-module_exit(test_stackinit_exit);
+kunit_test_suites(&stackinit_test_suite);
MODULE_LICENSE("GPL");
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 90f9f1b7afec..4f877e9551d5 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -968,6 +968,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
EXPORT_SYMBOL(memcpy_and_pad);
#ifdef CONFIG_FORTIFY_SOURCE
+/* These are placeholders for fortify compile-time warnings. */
+void __read_overflow2_field(size_t avail, size_t wanted) { }
+EXPORT_SYMBOL(__read_overflow2_field);
+void __write_overflow_field(size_t avail, size_t wanted) { }
+EXPORT_SYMBOL(__write_overflow_field);
+
void fortify_panic(const char *name)
{
pr_emerg("detected buffer overflow in %s\n", name);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 122d8d0e253c..08fc72d3ed16 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -120,7 +120,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (unlikely(count <= 0))
return 0;
- max_addr = user_addr_max();
+ max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 1616710b8a82..bffa0ebf9f8b 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -96,7 +96,7 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;
- max_addr = user_addr_max();
+ max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0c5cb2d6436a..2a7836e115b4 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -53,6 +53,7 @@
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
#define FLAG_VERIFIER_ZEXT BIT(3)
+#define FLAG_LARGE_MEM BIT(4)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -7838,7 +7839,7 @@ static struct bpf_test tests[] = {
},
/* BPF_LDX_MEM B/H/W/DW */
{
- "BPF_LDX_MEM | BPF_B",
+ "BPF_LDX_MEM | BPF_B, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000008ULL),
@@ -7878,7 +7879,56 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_H",
+ "BPF_LDX_MEM | BPF_B, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_B, R1, R2, -256),
+ BPF_LDX_MEM(BPF_B, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 256),
+ BPF_LDX_MEM(BPF_B, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 4096),
+ BPF_LDX_MEM(BPF_B, R0, R1, 4096),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 4096 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000708ULL),
@@ -7918,7 +7968,72 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_W",
+ "BPF_LDX_MEM | BPF_H, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_H, R1, R2, -256),
+ BPF_LDX_MEM(BPF_H, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 256),
+ BPF_LDX_MEM(BPF_H, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 8192),
+ BPF_LDX_MEM(BPF_H, R0, R1, 8192),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 8192 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 13),
+ BPF_LDX_MEM(BPF_H, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000005060708ULL),
@@ -7957,6 +8072,162 @@ static struct bpf_test tests[] = {
{ { 0, 0 } },
.stack_depth = 8,
},
+ {
+ "BPF_LDX_MEM | BPF_W, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_W, R1, R2, -256),
+ BPF_LDX_MEM(BPF_W, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 256),
+ BPF_LDX_MEM(BPF_W, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 16384),
+ BPF_LDX_MEM(BPF_W, R0, R1, 16384),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 16384 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 13),
+ BPF_LDX_MEM(BPF_W, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_DW, R1, R2, -256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 32760),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32768, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 13),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@@ -14094,6 +14365,9 @@ static void *generate_test_data(struct bpf_test *test, int sub)
if (test->aux & FLAG_NO_DATA)
return NULL;
+ if (test->aux & FLAG_LARGE_MEM)
+ return kmalloc(test->test[sub].data_size, GFP_KERNEL);
+
/* Test case expects an skb, so populate one. Various
* subtests generate skbs of different sizes based on
* the same data.
@@ -14137,7 +14411,10 @@ static void release_test_data(const struct bpf_test *test, void *data)
if (test->aux & FLAG_NO_DATA)
return;
- kfree_skb(data);
+ if (test->aux & FLAG_LARGE_MEM)
+ kfree(data);
+ else
+ kfree_skb(data);
}
static int filter_length(int which)
@@ -14674,6 +14951,36 @@ static struct tail_call_test tail_call_tests[] = {
.result = 10,
},
{
+ "Tail call load/store leaf",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP),
+ BPF_STX_MEM(BPF_DW, R3, R1, -8),
+ BPF_STX_MEM(BPF_DW, R3, R2, -16),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 3),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 32,
+ },
+ {
+ "Tail call load/store",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 3),
+ BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 16,
+ },
+ {
"Tail call error path, max count reached",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
diff --git a/lib/test_fortify/read_overflow2_field-memcpy.c b/lib/test_fortify/read_overflow2_field-memcpy.c
new file mode 100644
index 000000000000..de9569266223
--- /dev/null
+++ b/lib/test_fortify/read_overflow2_field-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(large, instance.buf, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2_field-memmove.c b/lib/test_fortify/read_overflow2_field-memmove.c
new file mode 100644
index 000000000000..6cc2724c8f62
--- /dev/null
+++ b/lib/test_fortify/read_overflow2_field-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(large, instance.buf, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memcpy.c b/lib/test_fortify/write_overflow_field-memcpy.c
new file mode 100644
index 000000000000..28cc81058dd3
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(instance.buf, large, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memmove.c b/lib/test_fortify/write_overflow_field-memmove.c
new file mode 100644
index 000000000000..377fcf9bb2fd
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(instance.buf, large, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memset.c b/lib/test_fortify/write_overflow_field-memset.c
new file mode 100644
index 000000000000..2331da26909e
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memset.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memset(instance.buf, 0x42, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
new file mode 100644
index 000000000000..ed70637a2ffa
--- /dev/null
+++ b/lib/test_fprobe.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * test_fprobe.c - simple sanity test for fprobe
+ */
+
+#include <linux/kernel.h>
+#include <linux/fprobe.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+
+#define div_factor 3
+
+static struct kunit *current_test;
+
+static u32 rand1, entry_val, exit_val;
+
+/* Use indirect calls to avoid inlining the target functions */
+static u32 (*target)(u32 value);
+static u32 (*target2)(u32 value);
+static unsigned long target_ip;
+static unsigned long target2_ip;
+
+static noinline u32 fprobe_selftest_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static noinline u32 fprobe_selftest_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ /* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
+ if (ip != target_ip)
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ entry_val = (rand1 / div_factor);
+}
+
+static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ if (ip != target_ip) {
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ } else
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor));
+ KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor));
+ exit_val = entry_val + div_factor;
+}
+
+/* Test entry only (no rethook) */
+static void test_fprobe_entry(struct kunit *test)
+{
+ struct fprobe fp_entry = {
+ .entry_handler = fp_entry_handler,
+ };
+
+ current_test = test;
+
+ /* Before register, unregister should be failed. */
+ KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry));
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry));
+}
+
+static void test_fprobe(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static void test_fprobe_syms(struct kunit *test)
+{
+ static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"};
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static unsigned long get_ftrace_location(void *func)
+{
+ unsigned long size, addr = (unsigned long)func;
+
+ if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size)
+ return 0;
+
+ return ftrace_location_range(addr, addr + size - 1);
+}
+
+static int fprobe_test_init(struct kunit *test)
+{
+ do {
+ rand1 = prandom_u32();
+ } while (rand1 <= div_factor);
+
+ target = fprobe_selftest_target;
+ target2 = fprobe_selftest_target2;
+ target_ip = get_ftrace_location(target);
+ target2_ip = get_ftrace_location(target2);
+
+ return 0;
+}
+
+static struct kunit_case fprobe_testcases[] = {
+ KUNIT_CASE(test_fprobe_entry),
+ KUNIT_CASE(test_fprobe),
+ KUNIT_CASE(test_fprobe_syms),
+ {}
+};
+
+static struct kunit_suite fprobe_test_suite = {
+ .name = "fprobe_test",
+ .init = fprobe_test_init,
+ .test_cases = fprobe_testcases,
+};
+
+kunit_test_suites(&fprobe_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 767538089a62..cfe632047839 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/memremap.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
@@ -26,6 +27,8 @@
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
#include <linux/rmap.h>
+#include <linux/mmu_notifier.h>
+#include <linux/migrate.h>
#include "test_hmm_uapi.h"
@@ -563,7 +566,6 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
}
dpage->zone_device_data = rpage;
- get_page(dpage);
lock_page(dpage);
return dpage;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 26a5c9007653..ad880231dfa8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -19,6 +19,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
+#include <linux/set_memory.h>
#include <asm/page.h>
@@ -36,7 +37,7 @@ void *kasan_ptr_result;
int kasan_int_result;
static struct kunit_resource resource;
-static struct kunit_kasan_expectation fail_data;
+static struct kunit_kasan_status test_status;
static bool multishot;
/*
@@ -53,58 +54,63 @@ static int kasan_test_init(struct kunit *test)
}
multishot = kasan_save_enable_multi_shot();
- fail_data.report_found = false;
+ test_status.report_found = false;
+ test_status.sync_fault = false;
kunit_add_named_resource(test, NULL, NULL, &resource,
- "kasan_data", &fail_data);
+ "kasan_status", &test_status);
return 0;
}
static void kasan_test_exit(struct kunit *test)
{
kasan_restore_multi_shot(multishot);
- KUNIT_EXPECT_FALSE(test, fail_data.report_found);
+ KUNIT_EXPECT_FALSE(test, test_status.report_found);
}
/**
* KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
* KASAN report; causes a test failure otherwise. This relies on a KUnit
- * resource named "kasan_data". Do not use this name for KUnit resources
+ * resource named "kasan_status". Do not use this name for KUnit resources
* outside of KASAN tests.
*
- * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
+ * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
* checking is auto-disabled. When this happens, this test handler reenables
* tag checking. As tag checking can be only disabled or enabled per CPU,
* this handler disables migration (preemption).
*
- * Since the compiler doesn't see that the expression can change the fail_data
+ * Since the compiler doesn't see that the expression can change the test_status
* fields, it can reorder or optimize away the accesses to those fields.
* Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
* expression to prevent that.
*
- * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
- * false. This allows detecting KASAN reports that happen outside of the checks
- * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
- * and in kasan_test_exit.
+ * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
+ * as false. This allows detecting KASAN reports that happen outside of the
+ * checks by asserting !test_status.report_found at the start of
+ * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
*/
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
kasan_sync_fault_possible()) \
migrate_disable(); \
- KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
+ KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
barrier(); \
expression; \
barrier(); \
- if (!READ_ONCE(fail_data.report_found)) { \
+ if (kasan_async_fault_possible()) \
+ kasan_force_async_fault(); \
+ if (!READ_ONCE(test_status.report_found)) { \
KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
"expected in \"" #expression \
"\", but none occurred"); \
} \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
- if (READ_ONCE(fail_data.report_found)) \
- kasan_enable_tagging_sync(); \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
+ kasan_sync_fault_possible()) { \
+ if (READ_ONCE(test_status.report_found) && \
+ READ_ONCE(test_status.sync_fault)) \
+ kasan_enable_tagging(); \
migrate_enable(); \
} \
- WRITE_ONCE(fail_data.report_found, false); \
+ WRITE_ONCE(test_status.report_found, false); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
@@ -780,7 +786,7 @@ static void ksize_uaf(struct kunit *test)
static void kasan_stack_oob(struct kunit *test)
{
char stack_array[10];
- /* See comment in kasan_global_oob. */
+ /* See comment in kasan_global_oob_right. */
char *volatile array = stack_array;
char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
@@ -793,7 +799,7 @@ static void kasan_alloca_oob_left(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
- /* See comment in kasan_global_oob. */
+ /* See comment in kasan_global_oob_right. */
char *volatile array = alloca_array;
char *p = array - 1;
@@ -808,7 +814,7 @@ static void kasan_alloca_oob_right(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
- /* See comment in kasan_global_oob. */
+ /* See comment in kasan_global_oob_right. */
char *volatile array = alloca_array;
char *p = array + i;
@@ -869,11 +875,14 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void empty_cache_ctor(void *object) { }
+
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
- cache = kmem_cache_create("test_cache", 200, 0, 0, NULL);
+ /* Provide a constructor to prevent cache merging. */
+ cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
kmem_cache_destroy(cache);
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
@@ -1054,21 +1063,186 @@ static void kmalloc_double_kzfree(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
}
+static void vmalloc_helpers_tags(struct kunit *test)
+{
+ void *ptr;
+
+ /* This test is intended for tag-based modes. */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+
+ ptr = vmalloc(PAGE_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ /* Check that the returned pointer is tagged. */
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+
+ /* Make sure exported vmalloc helpers handle tagged pointers. */
+ KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
+
+#if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
+ {
+ int rv;
+
+ /* Make sure vmalloc'ed memory permissions can be changed. */
+ rv = set_memory_ro((unsigned long)ptr, 1);
+ KUNIT_ASSERT_GE(test, rv, 0);
+ rv = set_memory_rw((unsigned long)ptr, 1);
+ KUNIT_ASSERT_GE(test, rv, 0);
+ }
+#endif
+
+ vfree(ptr);
+}
+
static void vmalloc_oob(struct kunit *test)
{
- void *area;
+ char *v_ptr, *p_ptr;
+ struct page *page;
+ size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+ v_ptr = vmalloc(size);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
+
+ OPTIMIZER_HIDE_VAR(v_ptr);
+
/*
- * We have to be careful not to hit the guard page.
+ * We have to be careful not to hit the guard page in vmalloc tests.
* The MMU will catch that and crash us.
*/
- area = vmalloc(3000);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
- vfree(area);
+ /* Make sure in-bounds accesses are valid. */
+ v_ptr[0] = 0;
+ v_ptr[size - 1] = 0;
+
+ /*
+ * An unaligned access past the requested vmalloc size.
+ * Only generic KASAN can precisely detect these.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
+
+ /* An aligned access into the first out-of-bounds granule. */
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
+
+ /* Check that in-bounds accesses to the physical page are valid. */
+ page = vmalloc_to_page(v_ptr);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
+ p_ptr = page_address(page);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
+ p_ptr[0] = 0;
+
+ vfree(v_ptr);
+
+ /*
+ * We can't check for use-after-unmap bugs in this nor in the following
+ * vmalloc tests, as the page might be fully unmapped and accessing it
+ * will crash the kernel.
+ */
+}
+
+static void vmap_tags(struct kunit *test)
+{
+ char *p_ptr, *v_ptr;
+ struct page *p_page, *v_page;
+
+ /*
+ * This test is specifically crafted for the software tag-based mode,
+ * the only tag-based mode that poisons vmap mappings.
+ */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
+
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+
+ p_page = alloc_pages(GFP_KERNEL, 1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
+ p_ptr = page_address(p_page);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
+
+ v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
+
+ /*
+ * We can't check for out-of-bounds bugs in this nor in the following
+ * vmalloc tests, as allocations have page granularity and accessing
+ * the guard page will crash the kernel.
+ */
+
+ KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
+
+ /* Make sure that in-bounds accesses through both pointers work. */
+ *p_ptr = 0;
+ *v_ptr = 0;
+
+ /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
+ v_page = vmalloc_to_page(v_ptr);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
+ KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
+
+ vunmap(v_ptr);
+ free_pages((unsigned long)p_ptr, 1);
+}
+
+static void vm_map_ram_tags(struct kunit *test)
+{
+ char *p_ptr, *v_ptr;
+ struct page *page;
+
+ /*
+ * This test is specifically crafted for the software tag-based mode,
+ * the only tag-based mode that poisons vm_map_ram mappings.
+ */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
+
+ page = alloc_pages(GFP_KERNEL, 1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
+ p_ptr = page_address(page);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
+
+ v_ptr = vm_map_ram(&page, 1, -1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
+
+ KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
+
+ /* Make sure that in-bounds accesses through both pointers work. */
+ *p_ptr = 0;
+ *v_ptr = 0;
+
+ vm_unmap_ram(v_ptr, 1);
+ free_pages((unsigned long)p_ptr, 1);
+}
+
+static void vmalloc_percpu(struct kunit *test)
+{
+ char __percpu *ptr;
+ int cpu;
+
+ /*
+ * This test is specifically crafted for the software tag-based mode,
+ * the only tag-based mode that poisons percpu mappings.
+ */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
+
+ ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+
+ for_each_possible_cpu(cpu) {
+ char *c_ptr = per_cpu_ptr(ptr, cpu);
+
+ KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
+
+ /* Make sure that in-bounds accesses don't crash the kernel. */
+ *c_ptr = 0;
+ }
+
+ free_percpu(ptr);
}
/*
@@ -1102,6 +1276,18 @@ static void match_all_not_assigned(struct kunit *test)
KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
free_pages((unsigned long)ptr, order);
}
+
+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ return;
+
+ for (i = 0; i < 256; i++) {
+ size = (get_random_int() % 1024) + 1;
+ ptr = vmalloc(size);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+ vfree(ptr);
+ }
}
/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
@@ -1207,7 +1393,11 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
+ KUNIT_CASE(vmalloc_helpers_tags),
KUNIT_CASE(vmalloc_oob),
+ KUNIT_CASE(vmap_tags),
+ KUNIT_CASE(vm_map_ram_tags),
+ KUNIT_CASE(vmalloc_percpu),
KUNIT_CASE(match_all_not_assigned),
KUNIT_CASE(match_all_ptr_tag),
KUNIT_CASE(match_all_mem_tag),
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index ce1589391413..cb800b1d0d99 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1149,6 +1149,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
if (ret) {
pr_err("could not register misc device: %d\n", ret);
free_test_dev_kmod(test_dev);
+ test_dev = NULL;
goto out;
}
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index 906b598740a7..c3fd87d6c2dd 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -417,9 +417,14 @@ static bool test_kernel_ptr(unsigned long addr, int size)
return false;
/* should be at least readable kernel address */
- if (access_ok(ptr, 1) ||
- access_ok(ptr + size - 1, 1) ||
- get_kernel_nofault(buf, ptr) ||
+ if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) &&
+ (access_ok((void __user *)ptr, 1) ||
+ access_ok((void __user *)ptr + size - 1, 1))) {
+ pr_err("user space ptr invalid in kernel: %#lx\n", addr);
+ return true;
+ }
+
+ if (get_kernel_nofault(buf, ptr) ||
get_kernel_nofault(buf, ptr + size - 1)) {
pr_err("invalid kernel ptr: %#lx\n", addr);
return true;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 8b1c318189ce..e77d4856442c 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1463,6 +1463,25 @@ unlock:
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_create_range_5(struct xarray *xa,
+ unsigned long index, unsigned int order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned int i;
+
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+ for (i = 0; i < order + 10; i++) {
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xa_destroy(xa);
+}
+
static noinline void check_create_range(struct xarray *xa)
{
unsigned int order;
@@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa)
check_create_range_4(xa, (3U << order) + 1, order);
check_create_range_4(xa, (3U << order) - 1, order);
check_create_range_4(xa, (1U << 24) + 1, order);
+
+ check_create_range_5(xa, 0, order);
+ check_create_range_5(xa, (1U << order), order);
}
check_create_range_3();
diff --git a/lib/ubsan.c b/lib/ubsan.c
index bdc380ff5d5c..36bd75e33426 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -154,16 +154,8 @@ static void ubsan_epilogue(void)
current->in_ubsan--;
- if (panic_on_warn) {
- /*
- * This thread may hit another WARN() in the panic path.
- * Resetting this prevents additional WARN() from panicking the
- * system on this thread. Other threads are blocked by the
- * panic_mutex in panic().
- */
- panic_on_warn = 0;
+ if (panic_on_warn)
panic("panic_on_warn set ...\n");
- }
}
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3b8129dd374c..40d26a07a133 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -49,10 +49,15 @@
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/byteorder.h> /* cpu_to_le16 */
+#include <asm/unaligned.h>
#include <linux/string_helpers.h>
#include "kstrtox.h"
+/* Disable pointer hashing if requested */
+bool no_hash_pointers __ro_after_init;
+EXPORT_SYMBOL_GPL(no_hash_pointers);
+
static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
{
const char *cp;
@@ -757,14 +762,16 @@ static void enable_ptr_key_workfn(struct work_struct *work)
static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
-static void fill_random_ptr_key(struct random_ready_callback *unused)
+static int fill_random_ptr_key(struct notifier_block *nb,
+ unsigned long action, void *data)
{
/* This may be in an interrupt handler. */
queue_work(system_unbound_wq, &enable_ptr_key_work);
+ return 0;
}
-static struct random_ready_callback random_ready = {
- .func = fill_random_ptr_key
+static struct notifier_block random_ready = {
+ .notifier_call = fill_random_ptr_key
};
static int __init initialize_ptr_random(void)
@@ -778,7 +785,7 @@ static int __init initialize_ptr_random(void)
return 0;
}
- ret = add_random_ready_callback(&random_ready);
+ ret = register_random_ready_notifier(&random_ready);
if (!ret) {
return 0;
} else if (ret == -EALREADY) {
@@ -848,6 +855,19 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
return pointer_string(buf, end, (const void *)hashval, spec);
}
+static char *default_pointer(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ /*
+ * default is to _not_ leak addresses, so hash before printing,
+ * unless no_hash_pointers is specified on the command line.
+ */
+ if (unlikely(no_hash_pointers))
+ return pointer_string(buf, end, ptr, spec);
+
+ return ptr_to_id(buf, end, ptr, spec);
+}
+
int kptr_restrict __read_mostly;
static noinline_for_stack
@@ -857,7 +877,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
switch (kptr_restrict) {
case 0:
/* Handle as %p, hash and do _not_ leak addresses. */
- return ptr_to_id(buf, end, ptr, spec);
+ return default_pointer(buf, end, ptr, spec);
case 1: {
const struct cred *cred;
@@ -1761,7 +1781,7 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc,
char output[sizeof("0123 little-endian (0x01234567)")];
char *p = output;
unsigned int i;
- u32 val;
+ u32 orig, val;
if (fmt[1] != 'c' || fmt[2] != 'c')
return error_string(buf, end, "(%p4?)", spec);
@@ -1769,21 +1789,23 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc,
if (check_pointer(&buf, end, fourcc, spec))
return buf;
- val = *fourcc & ~BIT(31);
+ orig = get_unaligned(fourcc);
+ val = orig & ~BIT(31);
- for (i = 0; i < sizeof(*fourcc); i++) {
+ for (i = 0; i < sizeof(u32); i++) {
unsigned char c = val >> (i * 8);
/* Print non-control ASCII characters as-is, dot otherwise */
*p++ = isascii(c) && isprint(c) ? c : '.';
}
- strcpy(p, *fourcc & BIT(31) ? " big-endian" : " little-endian");
+ *p++ = ' ';
+ strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
p += strlen(p);
*p++ = ' ';
*p++ = '(';
- p = special_hex_number(p, output + sizeof(output) - 2, *fourcc, sizeof(u32));
+ p = special_hex_number(p, output + sizeof(output) - 2, orig, sizeof(u32));
*p++ = ')';
*p = '\0';
@@ -2223,10 +2245,6 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
return widen_string(buf, buf - buf_start, end, spec);
}
-/* Disable pointer hashing if requested */
-bool no_hash_pointers __ro_after_init;
-EXPORT_SYMBOL_GPL(no_hash_pointers);
-
int __init no_hash_pointers_enable(char *str)
{
if (no_hash_pointers)
@@ -2455,7 +2473,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'e':
/* %pe with a non-ERR_PTR gets treated as plain %p */
if (!IS_ERR(ptr))
- break;
+ return default_pointer(buf, end, ptr, spec);
return err_ptr(buf, end, ptr, spec);
case 'u':
case 'k':
@@ -2465,16 +2483,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
default:
return error_string(buf, end, "(einval)", spec);
}
+ default:
+ return default_pointer(buf, end, ptr, spec);
}
-
- /*
- * default is to _not_ leak addresses, so hash before printing,
- * unless no_hash_pointers is specified on the command line.
- */
- if (unlikely(no_hash_pointers))
- return pointer_string(buf, end, ptr, spec);
- else
- return ptr_to_id(buf, end, ptr, spec);
}
/*
@@ -2895,13 +2906,15 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
+ if (unlikely(!size))
+ return 0;
+
i = vsnprintf(buf, size, fmt, args);
if (likely(i < size))
return i;
- if (size != 0)
- return size - 1;
- return 0;
+
+ return size - 1;
}
EXPORT_SYMBOL(vscnprintf);
diff --git a/lib/xarray.c b/lib/xarray.c
index 6f47f6375808..54e646e8e6ee 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
if (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset);
+ if (node->shift && xa_is_node(entry))
+ entry = XA_RETRY_ENTRY;
}
xas->xa_offset = offset;
@@ -302,7 +304,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
}
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!xas->xa_alloc)
return false;
xas->xa_alloc->parent = NULL;
@@ -334,10 +336,10 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
gfp |= __GFP_ACCOUNT;
if (gfpflags_allow_blocking(gfp)) {
xas_unlock_type(xas, lock_type);
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
xas_lock_type(xas, lock_type);
} else {
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
}
if (!xas->xa_alloc)
return false;
@@ -371,7 +373,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
- node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node) {
xas_set_err(xas, -ENOMEM);
return NULL;
@@ -722,6 +724,8 @@ void xas_create_range(struct xa_state *xas)
for (;;) {
struct xa_node *node = xas->xa_node;
+ if (node->shift >= shift)
+ break;
xas->xa_node = xa_parent_locked(xas->xa, node);
xas->xa_offset = node->offset - 1;
if (node->offset != 0)
@@ -1014,7 +1018,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
void *sibling = NULL;
struct xa_node *node;
- node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node)
goto nomem;
node->array = xas->xa;
@@ -1079,6 +1083,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
xa_mk_node(child));
if (xa_is_value(curr))
values--;
+ xas_update(xas, child);
} else {
unsigned int canon = offset - xas->xa_sibs;
@@ -1093,6 +1098,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
} while (offset-- > xas->xa_offset);
node->nr_values += values;
+ xas_update(xas, node);
}
EXPORT_SYMBOL_GPL(xas_split);
#endif