summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/Kconfig.ubsan11
-rw-r--r--lib/Makefile1
-rw-r--r--lib/alloc_tag.c34
-rw-r--r--lib/crc16.c9
-rw-r--r--lib/crc32.c10
-rw-r--r--lib/crypto/Kconfig89
-rw-r--r--lib/crypto/Makefile24
-rw-r--r--lib/crypto/aescfb.c2
-rw-r--r--lib/crypto/aesgcm.c2
-rw-r--r--lib/crypto/blake2s.c2
-rw-r--r--lib/crypto/chacha.c40
-rw-r--r--lib/crypto/chacha20poly1305-selftest.c8
-rw-r--r--lib/crypto/chacha20poly1305.c55
-rw-r--r--lib/crypto/curve25519.c2
-rw-r--r--lib/crypto/libchacha.c2
-rw-r--r--lib/crypto/poly1305-generic.c24
-rw-r--r--lib/crypto/poly1305.c75
-rw-r--r--lib/crypto/sha256-generic.c137
-rw-r--r--lib/crypto/sha256.c150
-rw-r--r--lib/crypto/sm3.c185
-rw-r--r--lib/devres.c1
-rw-r--r--lib/errseq.c13
-rw-r--r--lib/find_bit.c11
-rw-r--r--lib/iov_iter.c30
-rw-r--r--lib/kstrtox.c4
-rw-r--r--lib/kunit/executor.c2
-rw-r--r--lib/kunit/static_stub.c2
-rw-r--r--lib/llist.c22
-rw-r--r--lib/maple_tree.c191
-rw-r--r--lib/oid_registry.c25
-rw-r--r--lib/pldmfw/pldmfw.c6
-rw-r--r--lib/raid6/algos.c6
-rw-r--r--lib/raid6/avx512.c4
-rw-r--r--lib/raid6/recov_avx512.c6
-rw-r--r--lib/raid6/test/Makefile3
-rw-r--r--lib/ratelimit.c75
-rw-r--r--lib/rbtree.c8
-rw-r--r--lib/scatterlist.c23
-rw-r--r--lib/string_helpers.c39
-rw-r--r--lib/test_fortify/Makefile5
-rw-r--r--lib/test_kmod.c64
-rw-r--r--lib/test_sysctl.c131
-rw-r--r--lib/test_vmalloc.c22
-rw-r--r--lib/test_xarray.c17
-rw-r--r--lib/tests/Makefile1
-rw-r--r--lib/tests/crc_kunit.c6
-rw-r--r--lib/tests/overflow_kunit.c4
-rw-r--r--lib/tests/printf_kunit.c39
-rw-r--r--lib/tests/randstruct_kunit.c334
-rw-r--r--lib/tests/stackinit_kunit.c10
-rw-r--r--lib/tests/usercopy_kunit.c1
-rw-r--r--lib/ubsan.c8
-rw-r--r--lib/vsprintf.c50
-rw-r--r--lib/xarray.c9
55 files changed, 1404 insertions, 657 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f9051ab610d5..ebe33181b6e6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2153,18 +2153,12 @@ config ARCH_HAS_KCOV
build and run with CONFIG_KCOV. This typically requires
disabling instrumentation for some early boot code.
-config CC_HAS_SANCOV_TRACE_PC
- def_bool $(cc-option,-fsanitize-coverage=trace-pc)
-
-
config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
- depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
GCC_VERSION >= 120000 || CC_IS_CLANG
select DEBUG_FS
- select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select OBJTOOL if HAVE_NOINSTR_HACK
help
KCOV exposes kernel code coverage information in a form suitable
@@ -2574,8 +2568,7 @@ config TEST_BITOPS
config TEST_VMALLOC
tristate "Test module for stress/performance analysis of vmalloc allocator"
default n
- depends on MMU
- depends on m
+ depends on MMU
help
This builds the "test_vmalloc" module that should be used for
stress and performance analysis. So, any new change for vmalloc
@@ -2863,6 +2856,14 @@ config OVERFLOW_KUNIT_TEST
If unsure, say N.
+config RANDSTRUCT_KUNIT_TEST
+ tristate "Test randstruct structure layout randomization at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the checking CONFIG_RANDSTRUCT=y, which
+ randomizes structure layouts.
+
config STACKINIT_KUNIT_TEST
tristate "Test level of stack variable initialization" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2870,9 +2871,7 @@ config STACKINIT_KUNIT_TEST
help
Test if the kernel is zero-initializing stack variables and
padding. Coverage is controlled by compiler flags,
- CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO,
- CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
- or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+ CONFIG_INIT_STACK_ALL_PATTERN or CONFIG_INIT_STACK_ALL_ZERO.
config FORTIFY_KUNIT_TEST
tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
@@ -2983,13 +2982,7 @@ config TEST_DYNAMIC_DEBUG
config TEST_KMOD
tristate "kmod stress tester"
depends on m
- depends on NETDEVICES && NET_CORE && INET # for TUN
- depends on BLOCK
- depends on PAGE_SIZE_LESS_THAN_256KB # for BTRFS
select TEST_LKM
- select XFS_FS
- select TUN
- select BTRFS_FS
help
Test the kernel's module loading mechanism: kmod. kmod implements
support to load modules using the Linux kernel's usermode helper.
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index f6ea0c5b5da3..744121178815 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -118,6 +118,8 @@ config UBSAN_UNREACHABLE
config UBSAN_INTEGER_WRAP
bool "Perform checking for integer arithmetic wrap-around"
+ # This is very experimental so drop the next line if you really want it
+ depends on BROKEN
depends on !COMPILE_TEST
depends on $(cc-option,-fsanitize-undefined-ignore-overflow-pattern=all)
depends on $(cc-option,-fsanitize=signed-integer-overflow)
@@ -165,4 +167,13 @@ config TEST_UBSAN
This is a test module for UBSAN.
It triggers various undefined behavior, and detect it.
+config UBSAN_KVM_EL2
+ bool "UBSAN for KVM code at EL2"
+ depends on ARM64
+ help
+ Enable UBSAN when running on ARM64 with KVM in a split mode
+ (nvhe/hvhe/protected) for the hypervisor code running in EL2.
+ In this mode, any UBSAN violation in EL2 would panic the kernel
+ and information similar to UBSAN_TRAP would be printed.
+
endif # if UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index f07b24ce1b3f..c38582f187dd 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -71,7 +71,6 @@ CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
-CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index c7f602fa7b23..45dae7da70e1 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -244,17 +244,6 @@ static void shutdown_mem_profiling(bool remove_file)
mem_profiling_support = false;
}
-static void __init procfs_init(void)
-{
- if (!mem_profiling_support)
- return;
-
- if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) {
- pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
- shutdown_mem_profiling(false);
- }
-}
-
void __init alloc_tag_sec_init(void)
{
struct alloc_tag *last_codetag;
@@ -813,19 +802,34 @@ static int __init alloc_tag_init(void)
};
int res;
+ sysctl_init();
+
+ if (!mem_profiling_support) {
+ pr_info("Memory allocation profiling is not supported!\n");
+ return 0;
+ }
+
+ if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) {
+ pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
+ shutdown_mem_profiling(false);
+ return -ENOMEM;
+ }
+
res = alloc_mod_tags_mem();
- if (res)
+ if (res) {
+ pr_err("Failed to reserve address space for module tags, errno = %d\n", res);
+ shutdown_mem_profiling(true);
return res;
+ }
alloc_tag_cttype = codetag_register_type(&desc);
if (IS_ERR(alloc_tag_cttype)) {
+ pr_err("Allocation tags registration failed, errno = %ld\n", PTR_ERR(alloc_tag_cttype));
free_mod_tags_mem();
+ shutdown_mem_profiling(true);
return PTR_ERR(alloc_tag_cttype);
}
- sysctl_init();
- procfs_init();
-
return 0;
}
module_init(alloc_tag_init);
diff --git a/lib/crc16.c b/lib/crc16.c
index 5c3a803c01e0..9c71eda9bf4b 100644
--- a/lib/crc16.c
+++ b/lib/crc16.c
@@ -8,7 +8,7 @@
#include <linux/crc16.h>
/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
-u16 const crc16_table[256] = {
+static const u16 crc16_table[256] = {
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
@@ -42,20 +42,19 @@ u16 const crc16_table[256] = {
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
};
-EXPORT_SYMBOL(crc16_table);
/**
* crc16 - compute the CRC-16 for the data buffer
* @crc: previous CRC value
- * @buffer: data pointer
+ * @p: data pointer
* @len: number of bytes in the buffer
*
* Returns the updated CRC value.
*/
-u16 crc16(u16 crc, u8 const *buffer, size_t len)
+u16 crc16(u16 crc, const u8 *p, size_t len)
{
while (len--)
- crc = crc16_byte(crc, *buffer++);
+ crc = (crc >> 8) ^ crc16_table[(crc & 0xff) ^ *p++];
return crc;
}
EXPORT_SYMBOL(crc16);
diff --git a/lib/crc32.c b/lib/crc32.c
index fddd424ff224..95429861d3ac 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
* cleaned up code to current version of sparse and added the slicing-by-8
@@ -19,9 +20,6 @@
* drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
* fs/jffs2 uses seed 0, doesn't xor with ~0.
* fs/partitions/efi.c uses seed ~0, xor's with ~0.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
/* see: Documentation/staging/crc32.rst for a description of algorithms */
@@ -119,12 +117,6 @@ u32 crc32_le_shift(u32 crc, size_t len)
}
EXPORT_SYMBOL(crc32_le_shift);
-u32 crc32c_shift(u32 crc, size_t len)
-{
- return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
-}
-EXPORT_SYMBOL(crc32c_shift);
-
u32 crc32_be_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 798972b29b68..1ec1466108cc 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -50,22 +50,16 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC
tristate
+ default CRYPTO_LIB_CHACHA if !CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_UTILS
help
- This symbol can be depended upon by arch implementations of the
- ChaCha library interface that require the generic code as a
- fallback, e.g., for SIMD implementations. If no arch specific
- implementation is enabled, this implementation serves the users
- of CRYPTO_LIB_CHACHA.
-
-config CRYPTO_LIB_CHACHA_INTERNAL
- tristate
- select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
+ This symbol can be selected by arch implementations of the ChaCha
+ library interface that require the generic code as a fallback, e.g.,
+ for SIMD implementations. If no arch specific implementation is
+ enabled, this implementation serves the users of CRYPTO_LIB_CHACHA.
config CRYPTO_LIB_CHACHA
tristate
- select CRYPTO
- select CRYPTO_LIB_CHACHA_INTERNAL
help
Enable the ChaCha library interface. This interface may be fulfilled
by either the generic implementation or an arch-specific one, if one
@@ -120,21 +114,15 @@ config CRYPTO_ARCH_HAVE_LIB_POLY1305
config CRYPTO_LIB_POLY1305_GENERIC
tristate
+ default CRYPTO_LIB_POLY1305 if !CRYPTO_ARCH_HAVE_LIB_POLY1305
help
- This symbol can be depended upon by arch implementations of the
- Poly1305 library interface that require the generic code as a
- fallback, e.g., for SIMD implementations. If no arch specific
- implementation is enabled, this implementation serves the users
- of CRYPTO_LIB_POLY1305.
-
-config CRYPTO_LIB_POLY1305_INTERNAL
- tristate
- select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
+ This symbol can be selected by arch implementations of the Poly1305
+ library interface that require the generic code as a fallback, e.g.,
+ for SIMD implementations. If no arch specific implementation is
+ enabled, this implementation serves the users of CRYPTO_LIB_POLY1305.
config CRYPTO_LIB_POLY1305
tristate
- select CRYPTO
- select CRYPTO_LIB_POLY1305_INTERNAL
help
Enable the Poly1305 library interface. This interface may be fulfilled
by either the generic implementation or an arch-specific one, if one
@@ -151,5 +139,62 @@ config CRYPTO_LIB_SHA1
config CRYPTO_LIB_SHA256
tristate
+ help
+ Enable the SHA-256 library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_ARCH_HAVE_LIB_SHA256
+ bool
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the SHA-256 library interface.
+
+config CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
+ bool
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the SHA-256 library interface
+ that is SIMD-based and therefore not usable in hardirq
+ context.
+
+config CRYPTO_LIB_SHA256_GENERIC
+ tristate
+ default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256
+ help
+ This symbol can be selected by arch implementations of the SHA-256
+ library interface that require the generic code as a fallback, e.g.,
+ for SIMD implementations. If no arch specific implementation is
+ enabled, this implementation serves the users of CRYPTO_LIB_SHA256.
+
+config CRYPTO_LIB_SM3
+ tristate
+
+if !KMSAN # avoid false positives from assembly
+if ARM
+source "arch/arm/lib/crypto/Kconfig"
+endif
+if ARM64
+source "arch/arm64/lib/crypto/Kconfig"
+endif
+if MIPS
+source "arch/mips/lib/crypto/Kconfig"
+endif
+if PPC
+source "arch/powerpc/lib/crypto/Kconfig"
+endif
+if RISCV
+source "arch/riscv/lib/crypto/Kconfig"
+endif
+if S390
+source "arch/s390/lib/crypto/Kconfig"
+endif
+if SPARC
+source "arch/sparc/lib/crypto/Kconfig"
+endif
+if X86
+source "arch/x86/lib/crypto/Kconfig"
+endif
+endif
endmenu
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 01fac1cd05a1..3e79283b617d 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -25,9 +25,11 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o
obj-y += libblake2s.o
libblake2s-y := blake2s.o
libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
+libblake2s-$(CONFIG_CRYPTO_SELFTESTS) += blake2s-selftest.o
obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
libchacha20poly1305-y += chacha20poly1305.o
+libchacha20poly1305-$(CONFIG_CRYPTO_SELFTESTS) += chacha20poly1305-selftest.o
obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o
libcurve25519-generic-y := curve25519-fiat32.o
@@ -36,27 +38,31 @@ libcurve25519-generic-y += curve25519-generic.o
obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o
libcurve25519-y += curve25519.o
+libcurve25519-$(CONFIG_CRYPTO_SELFTESTS) += curve25519-selftest.o
obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
libdes-y := des.o
-obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o
-libpoly1305-y := poly1305-donna32.o
-libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
+obj-$(CONFIG_CRYPTO_LIB_POLY1305) += libpoly1305.o
libpoly1305-y += poly1305.o
+obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305-generic.o
+libpoly1305-generic-y := poly1305-donna32.o
+libpoly1305-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
+libpoly1305-generic-y += poly1305-generic.o
+
obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o
libsha1-y := sha1.o
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
-ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
-libblake2s-y += blake2s-selftest.o
-libchacha20poly1305-y += chacha20poly1305-selftest.o
-libcurve25519-y += curve25519-selftest.o
-endif
+obj-$(CONFIG_CRYPTO_LIB_SHA256_GENERIC) += libsha256-generic.o
+libsha256-generic-y := sha256-generic.o
obj-$(CONFIG_MPILIB) += mpi/
-obj-$(CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) += simd.o
+obj-$(CONFIG_CRYPTO_SELFTESTS) += simd.o
+
+obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o
+libsm3-y := sm3.o
diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c
index 749dc1258a44..437613265e14 100644
--- a/lib/crypto/aescfb.c
+++ b/lib/crypto/aescfb.c
@@ -99,7 +99,7 @@ MODULE_DESCRIPTION("Generic AES-CFB library");
MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
MODULE_LICENSE("GPL");
-#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS
/*
* Test code below. Vectors taken from crypto/testmgr.h
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
index 902e49410aaf..277824d6b4af 100644
--- a/lib/crypto/aesgcm.c
+++ b/lib/crypto/aesgcm.c
@@ -199,7 +199,7 @@ MODULE_DESCRIPTION("Generic AES-GCM library");
MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
MODULE_LICENSE("GPL");
-#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS
/*
* Test code below. Vectors taken from crypto/testmgr.h
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 71a316552cc5..b0f9a678300b 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(blake2s_final);
static int __init blake2s_mod_init(void)
{
- if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) &&
WARN_ON(!blake2s_selftest()))
return -ENODEV;
return 0;
diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c
index 3cdda3b5ee06..ced87dd31a97 100644
--- a/lib/crypto/chacha.c
+++ b/lib/crypto/chacha.c
@@ -13,8 +13,9 @@
#include <linux/unaligned.h>
#include <crypto/chacha.h>
-static void chacha_permute(u32 *x, int nrounds)
+static void chacha_permute(struct chacha_state *state, int nrounds)
{
+ u32 *x = state->x;
int i;
/* whitelist the allowed round counts */
@@ -65,34 +66,34 @@ static void chacha_permute(u32 *x, int nrounds)
/**
* chacha_block_generic - generate one keystream block and increment block counter
- * @state: input state matrix (16 32-bit words)
- * @stream: output keystream block (64 bytes)
+ * @state: input state matrix
+ * @out: output keystream block
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
* The caller has already converted the endianness of the input. This function
* also handles incrementing the block counter in the input matrix.
*/
-void chacha_block_generic(u32 *state, u8 *stream, int nrounds)
+void chacha_block_generic(struct chacha_state *state,
+ u8 out[CHACHA_BLOCK_SIZE], int nrounds)
{
- u32 x[16];
+ struct chacha_state permuted_state = *state;
int i;
- memcpy(x, state, 64);
+ chacha_permute(&permuted_state, nrounds);
- chacha_permute(x, nrounds);
+ for (i = 0; i < ARRAY_SIZE(state->x); i++)
+ put_unaligned_le32(permuted_state.x[i] + state->x[i],
+ &out[i * sizeof(u32)]);
- for (i = 0; i < ARRAY_SIZE(x); i++)
- put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
-
- state[12]++;
+ state->x[12]++;
}
EXPORT_SYMBOL(chacha_block_generic);
/**
* hchacha_block_generic - abbreviated ChaCha core, for XChaCha
- * @state: input state matrix (16 32-bit words)
- * @stream: output (8 32-bit words)
+ * @state: input state matrix
+ * @out: the output words
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
@@ -100,15 +101,14 @@ EXPORT_SYMBOL(chacha_block_generic);
* skips the final addition of the initial state, and outputs only certain words
* of the state. It should not be used for streaming directly.
*/
-void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds)
+void hchacha_block_generic(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
- u32 x[16];
-
- memcpy(x, state, 64);
+ struct chacha_state permuted_state = *state;
- chacha_permute(x, nrounds);
+ chacha_permute(&permuted_state, nrounds);
- memcpy(&stream[0], &x[0], 16);
- memcpy(&stream[4], &x[12], 16);
+ memcpy(&out[0], &permuted_state.x[0], 16);
+ memcpy(&out[4], &permuted_state.x[12], 16);
}
EXPORT_SYMBOL(hchacha_block_generic);
diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c
index 2ea61c28be4f..e4c85bc5a6d7 100644
--- a/lib/crypto/chacha20poly1305-selftest.c
+++ b/lib/crypto/chacha20poly1305-selftest.c
@@ -8832,7 +8832,7 @@ chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len,
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
- u32 chacha20_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha20_state;
union {
u8 block0[POLY1305_KEY_SIZE];
__le64 lens[2];
@@ -8844,12 +8844,12 @@ chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len,
memcpy(&bottom_row[4], nonce, 12);
for (i = 0; i < 8; ++i)
le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i);
- chacha_init(chacha20_state, le_key, bottom_row);
- chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0));
+ chacha_init(&chacha20_state, le_key, bottom_row);
+ chacha20_crypt(&chacha20_state, b.block0, b.block0, sizeof(b.block0));
poly1305_init(&poly1305_state, b.block0);
poly1305_update(&poly1305_state, ad, ad_len);
poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf);
- chacha20_crypt(chacha20_state, dst, src, src_len);
+ chacha20_crypt(&chacha20_state, dst, src, src_len);
poly1305_update(&poly1305_state, dst, src_len);
poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf);
b.lens[0] = cpu_to_le64(ad_len);
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index 9cfa886f1f89..e29eed49a5a1 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -18,8 +18,6 @@
#include <linux/mm.h>
#include <linux/module.h>
-#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
-
static void chacha_load_key(u32 *k, const u8 *in)
{
k[0] = get_unaligned_le32(in);
@@ -32,7 +30,8 @@ static void chacha_load_key(u32 *k, const u8 *in)
k[7] = get_unaligned_le32(in + 28);
}
-static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce)
+static void xchacha_init(struct chacha_state *chacha_state,
+ const u8 *key, const u8 *nonce)
{
u32 k[CHACHA_KEY_WORDS];
u8 iv[CHACHA_IV_SIZE];
@@ -54,7 +53,8 @@ static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce)
static void
__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
- const u8 *ad, const size_t ad_len, u32 *chacha_state)
+ const u8 *ad, const size_t ad_len,
+ struct chacha_state *chacha_state)
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
@@ -82,7 +82,7 @@ __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
poly1305_final(&poly1305_state, dst + src_len);
- memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32));
+ chacha_zeroize_state(chacha_state);
memzero_explicit(&b, sizeof(b));
}
@@ -91,7 +91,7 @@ void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u32 k[CHACHA_KEY_WORDS];
__le64 iv[2];
@@ -100,8 +100,9 @@ void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
iv[0] = 0;
iv[1] = cpu_to_le64(nonce);
- chacha_init(chacha_state, k, (u8 *)iv);
- __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+ chacha_init(&chacha_state, k, (u8 *)iv);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len,
+ &chacha_state);
memzero_explicit(iv, sizeof(iv));
memzero_explicit(k, sizeof(k));
@@ -113,16 +114,18 @@ void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
- xchacha_init(chacha_state, key, nonce);
- __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+ xchacha_init(&chacha_state, key, nonce);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len,
+ &chacha_state);
}
EXPORT_SYMBOL(xchacha20poly1305_encrypt);
static bool
__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
- const u8 *ad, const size_t ad_len, u32 *chacha_state)
+ const u8 *ad, const size_t ad_len,
+ struct chacha_state *chacha_state)
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
@@ -169,7 +172,7 @@ bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u32 k[CHACHA_KEY_WORDS];
__le64 iv[2];
bool ret;
@@ -179,11 +182,11 @@ bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
iv[0] = 0;
iv[1] = cpu_to_le64(nonce);
- chacha_init(chacha_state, k, (u8 *)iv);
+ chacha_init(&chacha_state, k, (u8 *)iv);
ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
- chacha_state);
+ &chacha_state);
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
memzero_explicit(iv, sizeof(iv));
memzero_explicit(k, sizeof(k));
return ret;
@@ -195,11 +198,11 @@ bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
- xchacha_init(chacha_state, key, nonce);
+ xchacha_init(&chacha_state, key, nonce);
return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
- chacha_state);
+ &chacha_state);
}
EXPORT_SYMBOL(xchacha20poly1305_decrypt);
@@ -213,7 +216,7 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
struct sg_mapping_iter miter;
size_t partial = 0;
unsigned int flags;
@@ -240,8 +243,8 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
b.iv[0] = 0;
b.iv[1] = cpu_to_le64(nonce);
- chacha_init(chacha_state, b.k, (u8 *)b.iv);
- chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ chacha_init(&chacha_state, b.k, (u8 *)b.iv);
+ chacha20_crypt(&chacha_state, b.block0, pad0, sizeof(b.block0));
poly1305_init(&poly1305_state, b.block0);
if (unlikely(ad_len)) {
@@ -276,13 +279,13 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
if (unlikely(length < sl))
l &= ~(CHACHA_BLOCK_SIZE - 1);
- chacha20_crypt(chacha_state, addr, addr, l);
+ chacha20_crypt(&chacha_state, addr, addr, l);
addr += l;
length -= l;
}
if (unlikely(length > 0)) {
- chacha20_crypt(chacha_state, b.chacha_stream, pad0,
+ chacha20_crypt(&chacha_state, b.chacha_stream, pad0,
CHACHA_BLOCK_SIZE);
crypto_xor(addr, b.chacha_stream, length);
partial = length;
@@ -323,7 +326,7 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
!crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE);
}
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
memzero_explicit(&b, sizeof(b));
return ret;
@@ -355,7 +358,7 @@ EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
static int __init chacha20poly1305_init(void)
{
- if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) &&
WARN_ON(!chacha20poly1305_selftest()))
return -ENODEV;
return 0;
diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c
index 064b352c6907..6850b76a80c9 100644
--- a/lib/crypto/curve25519.c
+++ b/lib/crypto/curve25519.c
@@ -15,7 +15,7 @@
static int __init curve25519_init(void)
{
- if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) &&
WARN_ON(!curve25519_selftest()))
return -ENODEV;
return 0;
diff --git a/lib/crypto/libchacha.c b/lib/crypto/libchacha.c
index cc1be0496eb9..ebcca381e248 100644
--- a/lib/crypto/libchacha.c
+++ b/lib/crypto/libchacha.c
@@ -12,7 +12,7 @@
#include <crypto/algapi.h> // for crypto_xor_cpy
#include <crypto/chacha.h>
-void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
/* aligned to potentially speed up crypto_xor() */
diff --git a/lib/crypto/poly1305-generic.c b/lib/crypto/poly1305-generic.c
new file mode 100644
index 000000000000..a73f700fa1fb
--- /dev/null
+++ b/lib/crypto/poly1305-generic.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Poly1305 authenticator algorithm, RFC7539
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
+ */
+
+#include <crypto/internal/poly1305.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+void poly1305_block_init_generic(struct poly1305_block_state *desc,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ poly1305_core_init(&desc->h);
+ poly1305_core_setkey(&desc->core_r, raw_key);
+}
+EXPORT_SYMBOL_GPL(poly1305_block_init_generic);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Poly1305 algorithm (generic implementation)");
diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
index 6e80214ebad8..5f2f2af3b59f 100644
--- a/lib/crypto/poly1305.c
+++ b/lib/crypto/poly1305.c
@@ -7,72 +7,67 @@
* Based on public domain code by Andrew Moon and Daniel J. Bernstein.
*/
+#include <crypto/internal/blockhash.h>
#include <crypto/internal/poly1305.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <linux/unaligned.h>
-void poly1305_init_generic(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE])
+void poly1305_init(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE])
{
- poly1305_core_setkey(&desc->core_r, key);
desc->s[0] = get_unaligned_le32(key + 16);
desc->s[1] = get_unaligned_le32(key + 20);
desc->s[2] = get_unaligned_le32(key + 24);
desc->s[3] = get_unaligned_le32(key + 28);
- poly1305_core_init(&desc->h);
desc->buflen = 0;
- desc->sset = true;
- desc->rset = 2;
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_block_init_arch(&desc->state, key);
+ else
+ poly1305_block_init_generic(&desc->state, key);
}
-EXPORT_SYMBOL_GPL(poly1305_init_generic);
+EXPORT_SYMBOL(poly1305_init);
-void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes)
+static inline void poly1305_blocks(struct poly1305_block_state *state,
+ const u8 *src, unsigned int len)
{
- unsigned int bytes;
-
- if (unlikely(desc->buflen)) {
- bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen);
- memcpy(desc->buf + desc->buflen, src, bytes);
- src += bytes;
- nbytes -= bytes;
- desc->buflen += bytes;
-
- if (desc->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf,
- 1, 1);
- desc->buflen = 0;
- }
- }
-
- if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
- poly1305_core_blocks(&desc->h, &desc->core_r, src,
- nbytes / POLY1305_BLOCK_SIZE, 1);
- src += nbytes - (nbytes % POLY1305_BLOCK_SIZE);
- nbytes %= POLY1305_BLOCK_SIZE;
- }
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_blocks_arch(state, src, len, 1);
+ else
+ poly1305_blocks_generic(state, src, len, 1);
+}
- if (unlikely(nbytes)) {
- desc->buflen = nbytes;
- memcpy(desc->buf, src, nbytes);
- }
+void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes)
+{
+ desc->buflen = BLOCK_HASH_UPDATE(poly1305_blocks, &desc->state,
+ src, nbytes, POLY1305_BLOCK_SIZE,
+ desc->buf, desc->buflen);
}
-EXPORT_SYMBOL_GPL(poly1305_update_generic);
+EXPORT_SYMBOL(poly1305_update);
-void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst)
+void poly1305_final(struct poly1305_desc_ctx *desc, u8 *dst)
{
if (unlikely(desc->buflen)) {
desc->buf[desc->buflen++] = 1;
memset(desc->buf + desc->buflen, 0,
POLY1305_BLOCK_SIZE - desc->buflen);
- poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0);
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_blocks_arch(&desc->state, desc->buf,
+ POLY1305_BLOCK_SIZE, 0);
+ else
+ poly1305_blocks_generic(&desc->state, desc->buf,
+ POLY1305_BLOCK_SIZE, 0);
}
- poly1305_core_emit(&desc->h, desc->s, dst);
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_emit_arch(&desc->state.h, dst, desc->s);
+ else
+ poly1305_emit_generic(&desc->state.h, dst, desc->s);
*desc = (struct poly1305_desc_ctx){};
}
-EXPORT_SYMBOL_GPL(poly1305_final_generic);
+EXPORT_SYMBOL(poly1305_final);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
diff --git a/lib/crypto/sha256-generic.c b/lib/crypto/sha256-generic.c
new file mode 100644
index 000000000000..a16ad4f25ebb
--- /dev/null
+++ b/lib/crypto/sha256-generic.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256, as specified in
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
+ *
+ * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2014 Red Hat Inc.
+ */
+
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
+
+static const u32 SHA256_K[] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
+static inline u32 Ch(u32 x, u32 y, u32 z)
+{
+ return z ^ (x & (y ^ z));
+}
+
+static inline u32 Maj(u32 x, u32 y, u32 z)
+{
+ return (x & y) | (z & (x | y));
+}
+
+#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
+#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
+#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
+#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
+
+static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+{
+ W[I] = get_unaligned_be32((__u32 *)input + I);
+}
+
+static inline void BLEND_OP(int I, u32 *W)
+{
+ W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+}
+
+#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
+ u32 t1, t2; \
+ t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
+ t2 = e0(a) + Maj(a, b, c); \
+ d += t1; \
+ h = t1 + t2; \
+} while (0)
+
+static void sha256_block_generic(u32 state[SHA256_STATE_WORDS],
+ const u8 *input, u32 W[64])
+{
+ u32 a, b, c, d, e, f, g, h;
+ int i;
+
+ /* load the input */
+ for (i = 0; i < 16; i += 8) {
+ LOAD_OP(i + 0, W, input);
+ LOAD_OP(i + 1, W, input);
+ LOAD_OP(i + 2, W, input);
+ LOAD_OP(i + 3, W, input);
+ LOAD_OP(i + 4, W, input);
+ LOAD_OP(i + 5, W, input);
+ LOAD_OP(i + 6, W, input);
+ LOAD_OP(i + 7, W, input);
+ }
+
+ /* now blend */
+ for (i = 16; i < 64; i += 8) {
+ BLEND_OP(i + 0, W);
+ BLEND_OP(i + 1, W);
+ BLEND_OP(i + 2, W);
+ BLEND_OP(i + 3, W);
+ BLEND_OP(i + 4, W);
+ BLEND_OP(i + 5, W);
+ BLEND_OP(i + 6, W);
+ BLEND_OP(i + 7, W);
+ }
+
+ /* load the state into our registers */
+ a = state[0]; b = state[1]; c = state[2]; d = state[3];
+ e = state[4]; f = state[5]; g = state[6]; h = state[7];
+
+ /* now iterate */
+ for (i = 0; i < 64; i += 8) {
+ SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
+ SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
+ SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
+ SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
+ SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
+ SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
+ SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
+ SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
+ }
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+ state[4] += e; state[5] += f; state[6] += g; state[7] += h;
+}
+
+void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ u32 W[64];
+
+ do {
+ sha256_block_generic(state, data, W);
+ data += SHA256_BLOCK_SIZE;
+ } while (--nblocks);
+
+ memzero_explicit(W, sizeof(W));
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_generic);
+
+MODULE_DESCRIPTION("SHA-256 Algorithm (generic implementation)");
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 04c1f2557e6c..107e5162507a 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -11,151 +11,65 @@
* Copyright (c) 2014 Red Hat Inc.
*/
-#include <linux/unaligned.h>
-#include <crypto/sha256_base.h>
+#include <crypto/internal/blockhash.h>
+#include <crypto/internal/sha2.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-static const u32 SHA256_K[] = {
- 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
- 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
- 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
- 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
- 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
- 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
- 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
- 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
- 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
- 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
- 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
- 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
- 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
- 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
- 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
- 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
-};
+/*
+ * If __DISABLE_EXPORTS is defined, then this file is being compiled for a
+ * pre-boot environment. In that case, ignore the kconfig options, pull the
+ * generic code into the same translation unit, and use that only.
+ */
+#ifdef __DISABLE_EXPORTS
+#include "sha256-generic.c"
+#endif
-static inline u32 Ch(u32 x, u32 y, u32 z)
+static inline bool sha256_purgatory(void)
{
- return z ^ (x & (y ^ z));
+ return __is_defined(__DISABLE_EXPORTS);
}
-static inline u32 Maj(u32 x, u32 y, u32 z)
+static inline void sha256_blocks(u32 state[SHA256_STATE_WORDS], const u8 *data,
+ size_t nblocks)
{
- return (x & y) | (z & (x | y));
+ sha256_choose_blocks(state, data, nblocks, sha256_purgatory(), false);
}
-#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
-#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
-#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
-#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
-
-static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len)
{
- W[I] = get_unaligned_be32((__u32 *)input + I);
-}
+ size_t partial = sctx->count % SHA256_BLOCK_SIZE;
-static inline void BLEND_OP(int I, u32 *W)
-{
- W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
-}
-
-#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
- u32 t1, t2; \
- t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
- t2 = e0(a) + Maj(a, b, c); \
- d += t1; \
- h = t1 + t2; \
-} while (0)
-
-static void sha256_transform(u32 *state, const u8 *input, u32 *W)
-{
- u32 a, b, c, d, e, f, g, h;
- int i;
-
- /* load the input */
- for (i = 0; i < 16; i += 8) {
- LOAD_OP(i + 0, W, input);
- LOAD_OP(i + 1, W, input);
- LOAD_OP(i + 2, W, input);
- LOAD_OP(i + 3, W, input);
- LOAD_OP(i + 4, W, input);
- LOAD_OP(i + 5, W, input);
- LOAD_OP(i + 6, W, input);
- LOAD_OP(i + 7, W, input);
- }
-
- /* now blend */
- for (i = 16; i < 64; i += 8) {
- BLEND_OP(i + 0, W);
- BLEND_OP(i + 1, W);
- BLEND_OP(i + 2, W);
- BLEND_OP(i + 3, W);
- BLEND_OP(i + 4, W);
- BLEND_OP(i + 5, W);
- BLEND_OP(i + 6, W);
- BLEND_OP(i + 7, W);
- }
-
- /* load the state into our registers */
- a = state[0]; b = state[1]; c = state[2]; d = state[3];
- e = state[4]; f = state[5]; g = state[6]; h = state[7];
-
- /* now iterate */
- for (i = 0; i < 64; i += 8) {
- SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
- SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
- SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
- SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
- SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
- SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
- SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
- SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
- }
-
- state[0] += a; state[1] += b; state[2] += c; state[3] += d;
- state[4] += e; state[5] += f; state[6] += g; state[7] += h;
-}
-
-static void sha256_transform_blocks(struct sha256_state *sctx,
- const u8 *input, int blocks)
-{
- u32 W[64];
-
- do {
- sha256_transform(sctx->state, input, W);
- input += SHA256_BLOCK_SIZE;
- } while (--blocks);
-
- memzero_explicit(W, sizeof(W));
-}
-
-void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
-{
- lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks);
+ sctx->count += len;
+ BLOCK_HASH_UPDATE_BLOCKS(sha256_blocks, sctx->ctx.state, data, len,
+ SHA256_BLOCK_SIZE, sctx->buf, partial);
}
EXPORT_SYMBOL(sha256_update);
-static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size)
+static inline void __sha256_final(struct sha256_state *sctx, u8 *out,
+ size_t digest_size)
{
- lib_sha256_base_do_finalize(sctx, sha256_transform_blocks);
- lib_sha256_base_finish(sctx, out, digest_size);
+ size_t partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sha256_finup(&sctx->ctx, sctx->buf, partial, out, digest_size,
+ sha256_purgatory(), false);
+ memzero_explicit(sctx, sizeof(*sctx));
}
-void sha256_final(struct sha256_state *sctx, u8 *out)
+void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE])
{
- __sha256_final(sctx, out, 32);
+ __sha256_final(sctx, out, SHA256_DIGEST_SIZE);
}
EXPORT_SYMBOL(sha256_final);
-void sha224_final(struct sha256_state *sctx, u8 *out)
+void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE])
{
- __sha256_final(sctx, out, 28);
+ __sha256_final(sctx, out, SHA224_DIGEST_SIZE);
}
EXPORT_SYMBOL(sha224_final);
-void sha256(const u8 *data, unsigned int len, u8 *out)
+void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE])
{
struct sha256_state sctx;
diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c
new file mode 100644
index 000000000000..efff0e267d84
--- /dev/null
+++ b/lib/crypto/sm3.c
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
+ * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <crypto/sm3.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
+
+static const u32 ____cacheline_aligned K[64] = {
+ 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
+ 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
+ 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
+ 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
+ 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
+ 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
+ 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
+ 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
+ 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
+ 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
+ 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
+ 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
+ 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
+ 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
+ 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
+ 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
+};
+
+/*
+ * Transform the message X which consists of 16 32-bit-words. See
+ * GM/T 004-2012 for details.
+ */
+#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
+ do { \
+ ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
+ ss2 = ss1 ^ rol32((a), 12); \
+ d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
+ h += GG ## i(e, f, g) + ss1 + (w1); \
+ b = rol32((b), 9); \
+ f = rol32((f), 19); \
+ h = P0((h)); \
+ } while (0)
+
+#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
+ R(1, a, b, c, d, e, f, g, h, t, w1, w2)
+#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
+ R(2, a, b, c, d, e, f, g, h, t, w1, w2)
+
+#define FF1(x, y, z) (x ^ y ^ z)
+#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
+
+#define GG1(x, y, z) FF1(x, y, z)
+#define GG2(x, y, z) ((x & y) | (~x & z))
+
+/* Message expansion */
+#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
+#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
+#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
+#define W1(i) (W[i & 0x0f])
+#define W2(i) (W[i & 0x0f] = \
+ P1(W[i & 0x0f] \
+ ^ W[(i-9) & 0x0f] \
+ ^ rol32(W[(i-3) & 0x0f], 15)) \
+ ^ rol32(W[(i-13) & 0x0f], 7) \
+ ^ W[(i-6) & 0x0f])
+
+static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
+{
+ u32 a, b, c, d, e, f, g, h, ss1, ss2;
+
+ a = sctx->state[0];
+ b = sctx->state[1];
+ c = sctx->state[2];
+ d = sctx->state[3];
+ e = sctx->state[4];
+ f = sctx->state[5];
+ g = sctx->state[6];
+ h = sctx->state[7];
+
+ R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
+ R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
+ R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
+ R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
+ R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
+ R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
+ R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
+ R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
+ R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
+ R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
+ R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
+ R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
+ R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
+ R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
+ R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
+ R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
+
+ R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
+ R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
+ R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
+ R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
+ R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
+ R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
+ R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
+ R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
+ R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
+ R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
+ R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
+ R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
+ R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
+ R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
+ R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
+ R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
+
+ R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
+ R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
+ R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
+ R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
+ R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
+ R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
+ R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
+ R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
+ R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
+ R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
+ R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
+ R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
+ R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
+ R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
+ R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
+ R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
+
+ R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
+ R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
+ R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
+ R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
+ R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
+ R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
+ R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
+ R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
+ R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
+ R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
+ R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
+ R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
+ R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
+ R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
+ R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
+ R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
+
+ sctx->state[0] ^= a;
+ sctx->state[1] ^= b;
+ sctx->state[2] ^= c;
+ sctx->state[3] ^= d;
+ sctx->state[4] ^= e;
+ sctx->state[5] ^= f;
+ sctx->state[6] ^= g;
+ sctx->state[7] ^= h;
+}
+#undef R
+#undef R1
+#undef R2
+#undef I
+#undef W1
+#undef W2
+
+void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks)
+{
+ u32 W[16];
+
+ do {
+ sm3_transform(sctx, data, W);
+ data += SM3_BLOCK_SIZE;
+ } while (--blocks);
+
+ memzero_explicit(W, sizeof(W));
+}
+EXPORT_SYMBOL_GPL(sm3_block_generic);
+
+MODULE_DESCRIPTION("Generic SM3 library");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/devres.c b/lib/devres.c
index 73901160197e..378b07730420 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -206,6 +206,7 @@ void __iomem *devm_ioremap_resource_wc(struct device *dev,
{
return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
}
+EXPORT_SYMBOL(devm_ioremap_resource_wc);
/*
* devm_of_iomap - Requests a resource and maps the memory mapped IO
diff --git a/lib/errseq.c b/lib/errseq.c
index 93e9b94358dc..13a2581c5a87 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -34,11 +34,14 @@
*/
/* The low bits are designated for error code (max of MAX_ERRNO) */
-#define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1)
+#define ERRSEQ_SHIFT (ilog2(MAX_ERRNO) + 1)
/* This bit is used as a flag to indicate whether the value has been seen */
#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT)
+/* Leverage macro ERRSEQ_SEEN to define errno mask macro here */
+#define ERRNO_MASK (ERRSEQ_SEEN - 1)
+
/* The lowest bit of the counter */
#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1))
@@ -60,8 +63,6 @@ errseq_t errseq_set(errseq_t *eseq, int err)
{
errseq_t cur, old;
- /* MAX_ERRNO must be able to serve as a mask */
- BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1);
/*
* Ensure the error code actually fits where we want it to go. If it
@@ -79,7 +80,7 @@ errseq_t errseq_set(errseq_t *eseq, int err)
errseq_t new;
/* Clear out error bits and set new error */
- new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err;
+ new = (old & ~(ERRNO_MASK | ERRSEQ_SEEN)) | -err;
/* Only increment if someone has looked at it */
if (old & ERRSEQ_SEEN)
@@ -148,7 +149,7 @@ int errseq_check(errseq_t *eseq, errseq_t since)
if (likely(cur == since))
return 0;
- return -(cur & MAX_ERRNO);
+ return -(cur & ERRNO_MASK);
}
EXPORT_SYMBOL(errseq_check);
@@ -200,7 +201,7 @@ int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
if (new != old)
cmpxchg(eseq, old, new);
*since = new;
- err = -(new & MAX_ERRNO);
+ err = -(new & ERRNO_MASK);
}
return err;
}
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 0836bb3d76c5..06b6342aa3ae 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -117,6 +117,17 @@ EXPORT_SYMBOL(_find_first_and_bit);
#endif
/*
+ * Find the first bit set in 1st memory region and unset in 2nd.
+ */
+unsigned long _find_first_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ return FIND_FIRST_BIT(addr1[idx] & ~addr2[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_andnot_bit);
+
+/*
* Find the first set bit in three memory regions.
*/
unsigned long _find_first_and_and_bit(const unsigned long *addr1,
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index bc9391e55d57..d9e19fb2dcf3 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1059,22 +1059,22 @@ static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa
pgoff_t index, unsigned int nr_pages)
{
XA_STATE(xas, xa, index);
- struct page *page;
+ struct folio *folio;
unsigned int ret = 0;
rcu_read_lock();
- for (page = xas_load(&xas); page; page = xas_next(&xas)) {
- if (xas_retry(&xas, page))
+ for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
+ if (xas_retry(&xas, folio))
continue;
- /* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas))) {
+ /* Has the folio moved or been split? */
+ if (unlikely(folio != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
- pages[ret] = find_subpage(page, xas.xa_index);
- get_page(pages[ret]);
+ pages[ret] = folio_file_page(folio, xas.xa_index);
+ folio_get(folio);
if (++ret == nr_pages)
break;
}
@@ -1650,11 +1650,11 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
- struct page *page, **p;
+ struct page **p;
+ struct folio *folio;
unsigned int nr = 0, offset;
loff_t pos = i->xarray_start + i->iov_offset;
- pgoff_t index = pos >> PAGE_SHIFT;
- XA_STATE(xas, i->xarray, index);
+ XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT);
offset = pos & ~PAGE_MASK;
*offset0 = offset;
@@ -1665,17 +1665,17 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
p = *pages;
rcu_read_lock();
- for (page = xas_load(&xas); page; page = xas_next(&xas)) {
- if (xas_retry(&xas, page))
+ for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
+ if (xas_retry(&xas, folio))
continue;
- /* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas))) {
+ /* Has the folio moved or been split? */
+ if (unlikely(folio != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
- p[nr++] = find_subpage(page, xas.xa_index);
+ p[nr++] = folio_file_page(folio, xas.xa_index);
if (nr == maxpages)
break;
}
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index d586e6af5e5a..bdde40cd69d7 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -351,6 +351,8 @@ int kstrtobool(const char *s, bool *res)
return -EINVAL;
switch (s[0]) {
+ case 'e':
+ case 'E':
case 'y':
case 'Y':
case 't':
@@ -358,6 +360,8 @@ int kstrtobool(const char *s, bool *res)
case '1':
*res = true;
return 0;
+ case 'd':
+ case 'D':
case 'n':
case 'N':
case 'f':
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 3f39955cb0f1..0061d4c7e351 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -177,7 +177,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
const size_t max = suite_set->end - suite_set->start;
- copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL);
+ copy = kcalloc(max, sizeof(*copy), GFP_KERNEL);
if (!copy) { /* won't be able to run anything, return an empty set */
return filtered;
}
diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c
index 92b2cccd5e76..484fd85251b4 100644
--- a/lib/kunit/static_stub.c
+++ b/lib/kunit/static_stub.c
@@ -96,7 +96,7 @@ void __kunit_activate_static_stub(struct kunit *test,
/* If the replacement address is NULL, deactivate the stub. */
if (!replacement_addr) {
- kunit_deactivate_static_stub(test, replacement_addr);
+ kunit_deactivate_static_stub(test, real_fn_addr);
return;
}
diff --git a/lib/llist.c b/lib/llist.c
index f21d0cfbbaaa..f574c17a238e 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -14,28 +14,6 @@
#include <linux/export.h>
#include <linux/llist.h>
-
-/**
- * llist_add_batch - add several linked entries in batch
- * @new_first: first entry in batch to be added
- * @new_last: last entry in batch to be added
- * @head: the head for your lock-less list
- *
- * Return whether list is empty before adding.
- */
-bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
- struct llist_head *head)
-{
- struct llist_node *first = READ_ONCE(head->first);
-
- do {
- new_last->next = first;
- } while (!try_cmpxchg(&head->first, &first, new_first));
-
- return !first;
-}
-EXPORT_SYMBOL_GPL(llist_add_batch);
-
/**
* llist_del_first - delete the first entry of lock-less list
* @head: the head for your lock-less list
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index d0bea23fa4bc..affe979bd14d 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -211,14 +211,14 @@ static void ma_free_rcu(struct maple_node *node)
call_rcu(&node->rcu, mt_free_rcu);
}
-static void mas_set_height(struct ma_state *mas)
+static void mt_set_height(struct maple_tree *mt, unsigned char height)
{
- unsigned int new_flags = mas->tree->ma_flags;
+ unsigned int new_flags = mt->ma_flags;
new_flags &= ~MT_FLAGS_HEIGHT_MASK;
- MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
- new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
- mas->tree->ma_flags = new_flags;
+ MT_BUG_ON(mt, height > MAPLE_HEIGHT_MAX);
+ new_flags |= height << MT_FLAGS_HEIGHT_OFFSET;
+ mt->ma_flags = new_flags;
}
static unsigned int mas_mt_height(struct ma_state *mas)
@@ -1371,7 +1371,7 @@ retry:
root = mas_root(mas);
/* Tree with nodes */
if (likely(xa_is_node(root))) {
- mas->depth = 1;
+ mas->depth = 0;
mas->status = ma_active;
mas->node = mte_safe_root(root);
mas->offset = 0;
@@ -1712,9 +1712,10 @@ static inline void mas_adopt_children(struct ma_state *mas,
* node as dead.
* @mas: the maple state with the new node
* @old_enode: The old maple encoded node to replace.
+ * @new_height: if we are inserting a root node, update the height of the tree
*/
static inline void mas_put_in_tree(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, char new_height)
__must_hold(mas->tree->ma_lock)
{
unsigned char offset;
@@ -1723,7 +1724,7 @@ static inline void mas_put_in_tree(struct ma_state *mas,
if (mte_is_root(mas->node)) {
mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
- mas_set_height(mas);
+ mt_set_height(mas->tree, new_height);
} else {
offset = mte_parent_slot(mas->node);
@@ -1741,12 +1742,13 @@ static inline void mas_put_in_tree(struct ma_state *mas,
* the parent encoding to locate the maple node in the tree.
* @mas: the ma_state with @mas->node pointing to the new node.
* @old_enode: The old maple encoded node.
+ * @new_height: The new height of the tree as a result of the operation
*/
static inline void mas_replace_node(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, unsigned char new_height)
__must_hold(mas->tree->ma_lock)
{
- mas_put_in_tree(mas, old_enode);
+ mas_put_in_tree(mas, old_enode, new_height);
mas_free(mas, old_enode);
}
@@ -2536,10 +2538,11 @@ static inline void mas_topiary_node(struct ma_state *mas,
*
* @mas: The maple state pointing at the new data
* @old_enode: The maple encoded node being replaced
+ * @new_height: The new height of the tree as a result of the operation
*
*/
static inline void mas_topiary_replace(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, unsigned char new_height)
{
struct ma_state tmp[3], tmp_next[3];
MA_TOPIARY(subtrees, mas->tree);
@@ -2547,7 +2550,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
int i, n;
/* Place data in tree & then mark node as old */
- mas_put_in_tree(mas, old_enode);
+ mas_put_in_tree(mas, old_enode, new_height);
/* Update the parent pointers in the tree */
tmp[0] = *mas;
@@ -2631,14 +2634,15 @@ static inline void mas_topiary_replace(struct ma_state *mas,
* mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
* @old_enode: The old maple encoded node that is being replaced.
+ * @new_height: The new height of the tree as a result of the operation
*
* Updates gap as necessary.
*/
static inline void mas_wmb_replace(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, unsigned char new_height)
{
/* Insert the new data in the tree */
- mas_topiary_replace(mas, old_enode);
+ mas_topiary_replace(mas, old_enode, new_height);
if (mte_is_leaf(mas->node))
return;
@@ -2737,7 +2741,7 @@ static inline bool mast_sufficient(struct maple_subtree_state *mast)
*/
static inline bool mast_overflow(struct maple_subtree_state *mast)
{
- if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
+ if (mast->bn->b_end > mt_slot_count(mast->orig_l->node))
return true;
return false;
@@ -2824,6 +2828,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
{
unsigned char split, mid_split;
unsigned char slot = 0;
+ unsigned char new_height = 0; /* used if node is a new root */
struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
struct maple_enode *old_enode;
@@ -2845,8 +2850,6 @@ static void mas_spanning_rebalance(struct ma_state *mas,
unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
mast_spanning_rebalance(mast);
- l_mas.depth = 0;
-
/*
* Each level of the tree is examined and balanced, pushing data to the left or
* right, or rebalancing against left or right nodes is employed to avoid
@@ -2866,6 +2869,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
+ new_height++;
/*
* Copy data from next level in the tree to mast->bn from next
@@ -2873,7 +2877,6 @@ static void mas_spanning_rebalance(struct ma_state *mas,
*/
memset(mast->bn, 0, sizeof(struct maple_big_node));
mast->bn->type = mte_node_type(left);
- l_mas.depth++;
/* Root already stored in l->node. */
if (mas_is_root_limits(mast->l))
@@ -2890,11 +2893,21 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast_combine_cp_right(mast);
mast->orig_l->last = mast->orig_l->max;
- if (mast_sufficient(mast))
- continue;
+ if (mast_sufficient(mast)) {
+ if (mast_overflow(mast))
+ continue;
+
+ if (mast->orig_l->node == mast->orig_r->node) {
+ /*
+ * The data in b_node should be stored in one
+ * node and in the tree
+ */
+ slot = mast->l->offset;
+ break;
+ }
- if (mast_overflow(mast))
continue;
+ }
/* May be a new root stored in mast->bn */
if (mas_is_root_limits(mast->orig_l))
@@ -2909,8 +2922,9 @@ static void mas_spanning_rebalance(struct ma_state *mas,
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
mte_node_type(mast->orig_l->node));
- l_mas.depth++;
+
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
+ new_height++;
mas_set_parent(mas, left, l_mas.node, slot);
if (middle)
mas_set_parent(mas, middle, l_mas.node, ++slot);
@@ -2933,7 +2947,7 @@ new_root:
mas->min = l_mas.min;
mas->max = l_mas.max;
mas->offset = l_mas.offset;
- mas_wmb_replace(mas, old_enode);
+ mas_wmb_replace(mas, old_enode, new_height);
mtree_range_walk(mas);
return;
}
@@ -3009,6 +3023,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
void __rcu **l_slots, **slots;
unsigned long *l_pivs, *pivs, gap;
bool in_rcu = mt_in_rcu(mas->tree);
+ unsigned char new_height = mas_mt_height(mas);
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
@@ -3103,7 +3118,7 @@ done:
mas_ascend(mas);
if (in_rcu) {
- mas_replace_node(mas, old_eparent);
+ mas_replace_node(mas, old_eparent, new_height);
mas_adopt_children(mas, mas->node);
}
@@ -3114,10 +3129,9 @@ done:
* mas_split_final_node() - Split the final node in a subtree operation.
* @mast: the maple subtree state
* @mas: The maple state
- * @height: The height of the tree in case it's a new root.
*/
static inline void mas_split_final_node(struct maple_subtree_state *mast,
- struct ma_state *mas, int height)
+ struct ma_state *mas)
{
struct maple_enode *ancestor;
@@ -3126,7 +3140,6 @@ static inline void mas_split_final_node(struct maple_subtree_state *mast,
mast->bn->type = maple_arange_64;
else
mast->bn->type = maple_range_64;
- mas->depth = height;
}
/*
* Only a single node is used here, could be root.
@@ -3214,7 +3227,6 @@ static inline void mast_split_data(struct maple_subtree_state *mast,
* mas_push_data() - Instead of splitting a node, it is beneficial to push the
* data to the right or left node if there is room.
* @mas: The maple state
- * @height: The current height of the maple state
* @mast: The maple subtree state
* @left: Push left or not.
*
@@ -3222,8 +3234,8 @@ static inline void mast_split_data(struct maple_subtree_state *mast,
*
* Return: True if pushed, false otherwise.
*/
-static inline bool mas_push_data(struct ma_state *mas, int height,
- struct maple_subtree_state *mast, bool left)
+static inline bool mas_push_data(struct ma_state *mas,
+ struct maple_subtree_state *mast, bool left)
{
unsigned char slot_total = mast->bn->b_end;
unsigned char end, space, split;
@@ -3280,7 +3292,7 @@ static inline bool mas_push_data(struct ma_state *mas, int height,
mast_split_data(mast, mas, split);
mast_fill_bnode(mast, mas, 2);
- mas_split_final_node(mast, mas, height + 1);
+ mas_split_final_node(mast, mas);
return true;
}
@@ -3293,6 +3305,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
{
struct maple_subtree_state mast;
int height = 0;
+ unsigned int orig_height = mas_mt_height(mas);
unsigned char mid_split, split = 0;
struct maple_enode *old;
@@ -3319,7 +3332,6 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(__func__, mas);
- mas->depth = mas_mt_height(mas);
mast.l = &l_mas;
mast.r = &r_mas;
@@ -3327,9 +3339,9 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
mast.orig_r = &prev_r_mas;
mast.bn = b_node;
- while (height++ <= mas->depth) {
+ while (height++ <= orig_height) {
if (mt_slots[b_node->type] > b_node->b_end) {
- mas_split_final_node(&mast, mas, height);
+ mas_split_final_node(&mast, mas);
break;
}
@@ -3344,11 +3356,15 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
* is a significant savings.
*/
/* Try to push left. */
- if (mas_push_data(mas, height, &mast, true))
+ if (mas_push_data(mas, &mast, true)) {
+ height++;
break;
+ }
/* Try to push right. */
- if (mas_push_data(mas, height, &mast, false))
+ if (mas_push_data(mas, &mast, false)) {
+ height++;
break;
+ }
split = mab_calc_split(mas, b_node, &mid_split);
mast_split_data(&mast, mas, split);
@@ -3365,7 +3381,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
/* Set the original node as dead */
old = mas->node;
mas->node = l_mas.node;
- mas_wmb_replace(mas, old);
+ mas_wmb_replace(mas, old, height);
mtree_range_walk(mas);
return;
}
@@ -3424,8 +3440,7 @@ static inline void mas_root_expand(struct ma_state *mas, void *entry)
if (mas->last != ULONG_MAX)
pivots[++slot] = ULONG_MAX;
- mas->depth = 1;
- mas_set_height(mas);
+ mt_set_height(mas->tree, 1);
ma_set_meta(node, maple_leaf_64, 0, slot);
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
@@ -3532,6 +3547,16 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
if (ma_is_leaf(wr_mas->type))
return true;
+ if (mas->end < mt_slots[wr_mas->type] - 1)
+ wr_mas->vacant_height = mas->depth + 1;
+
+ if (ma_is_root(mas_mn(mas))) {
+ /* root needs more than 2 entries to be sufficient + 1 */
+ if (mas->end > 2)
+ wr_mas->sufficient_height = 1;
+ } else if (mas->end > mt_min_slots[wr_mas->type] + 1)
+ wr_mas->sufficient_height = mas->depth + 1;
+
mas_wr_walk_traverse(wr_mas);
}
@@ -3669,8 +3694,7 @@ static inline void mas_new_root(struct ma_state *mas, void *entry)
WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX);
if (!entry) {
- mas->depth = 0;
- mas_set_height(mas);
+ mt_set_height(mas->tree, 0);
rcu_assign_pointer(mas->tree->ma_root, entry);
mas->status = ma_start;
goto done;
@@ -3684,8 +3708,7 @@ static inline void mas_new_root(struct ma_state *mas, void *entry)
mas->status = ma_active;
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
- mas->depth = 1;
- mas_set_height(mas);
+ mt_set_height(mas->tree, 1);
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
done:
@@ -3804,6 +3827,7 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
struct maple_node reuse, *newnode;
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
+ unsigned char height = mas_mt_height(mas);
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
@@ -3860,7 +3884,7 @@ done:
struct maple_enode *old_enode = mas->node;
mas->node = mt_mk_node(newnode, wr_mas->type);
- mas_replace_node(mas, old_enode);
+ mas_replace_node(mas, old_enode, height);
} else {
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
@@ -4059,15 +4083,6 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
unsigned char new_end = mas_wr_new_end(wr_mas);
switch (mas->store_type) {
- case wr_invalid:
- MT_BUG_ON(mas->tree, 1);
- return;
- case wr_new_root:
- mas_new_root(mas, wr_mas->entry);
- break;
- case wr_store_root:
- mas_store_root(mas, wr_mas->entry);
- break;
case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
if (!!wr_mas->entry ^ !!wr_mas->content)
@@ -4089,6 +4104,14 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
case wr_rebalance:
mas_wr_bnode(wr_mas);
break;
+ case wr_new_root:
+ mas_new_root(mas, wr_mas->entry);
+ break;
+ case wr_store_root:
+ mas_store_root(mas, wr_mas->entry);
+ break;
+ case wr_invalid:
+ MT_BUG_ON(mas->tree, 1);
}
return;
@@ -4140,18 +4163,41 @@ set_content:
/**
* mas_prealloc_calc() - Calculate number of nodes needed for a
* given store oepration
- * @mas: The maple state
+ * @wr_mas: The maple write state
* @entry: The entry to store into the tree
*
* Return: Number of nodes required for preallocation.
*/
-static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
+static inline int mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry)
{
- int ret = mas_mt_height(mas) * 3 + 1;
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char height = mas_mt_height(mas);
+ int ret = height * 3 + 1;
+ unsigned char delta = height - wr_mas->vacant_height;
switch (mas->store_type) {
- case wr_invalid:
- WARN_ON_ONCE(1);
+ case wr_exact_fit:
+ case wr_append:
+ case wr_slot_store:
+ ret = 0;
+ break;
+ case wr_spanning_store:
+ if (wr_mas->sufficient_height < wr_mas->vacant_height)
+ ret = (height - wr_mas->sufficient_height) * 3 + 1;
+ else
+ ret = delta * 3 + 1;
+ break;
+ case wr_split_store:
+ ret = delta * 2 + 1;
+ break;
+ case wr_rebalance:
+ if (wr_mas->sufficient_height < wr_mas->vacant_height)
+ ret = (height - wr_mas->sufficient_height) * 2 + 1;
+ else
+ ret = delta * 2 + 1;
+ break;
+ case wr_node_store:
+ ret = mt_in_rcu(mas->tree) ? 1 : 0;
break;
case wr_new_root:
ret = 1;
@@ -4164,22 +4210,8 @@ static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
else
ret = 0;
break;
- case wr_spanning_store:
- ret = mas_mt_height(mas) * 3 + 1;
- break;
- case wr_split_store:
- ret = mas_mt_height(mas) * 2 + 1;
- break;
- case wr_rebalance:
- ret = mas_mt_height(mas) * 2 - 1;
- break;
- case wr_node_store:
- ret = mt_in_rcu(mas->tree) ? 1 : 0;
- break;
- case wr_append:
- case wr_exact_fit:
- case wr_slot_store:
- ret = 0;
+ case wr_invalid:
+ WARN_ON_ONCE(1);
}
return ret;
@@ -4243,16 +4275,15 @@ static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
*/
static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
{
- struct ma_state *mas = wr_mas->mas;
int request;
mas_wr_prealloc_setup(wr_mas);
- mas->store_type = mas_wr_store_type(wr_mas);
- request = mas_prealloc_calc(mas, entry);
+ wr_mas->mas->store_type = mas_wr_store_type(wr_mas);
+ request = mas_prealloc_calc(wr_mas, entry);
if (!request)
return;
- mas_node_count(mas, request);
+ mas_node_count(wr_mas->mas, request);
}
/**
@@ -5397,7 +5428,7 @@ void *mas_store(struct ma_state *mas, void *entry)
return wr_mas.content;
}
- request = mas_prealloc_calc(mas, entry);
+ request = mas_prealloc_calc(&wr_mas, entry);
if (!request)
goto store;
@@ -5494,7 +5525,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
mas_wr_prealloc_setup(&wr_mas);
mas->store_type = mas_wr_store_type(&wr_mas);
- request = mas_prealloc_calc(mas, entry);
+ request = mas_prealloc_calc(&wr_mas, entry);
if (!request)
return ret;
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index fe6705cfd780..9b757a117f09 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -117,7 +117,7 @@ int parse_OID(const void *data, size_t datasize, enum OID *oid)
EXPORT_SYMBOL_GPL(parse_OID);
/*
- * sprint_OID - Print an Object Identifier into a buffer
+ * sprint_oid - Print an Object Identifier into a buffer
* @data: The encoded OID to print
* @datasize: The size of the encoded OID
* @buffer: The buffer to render into
@@ -173,26 +173,3 @@ bad:
return -EBADMSG;
}
EXPORT_SYMBOL_GPL(sprint_oid);
-
-/**
- * sprint_OID - Print an Object Identifier into a buffer
- * @oid: The OID to print
- * @buffer: The buffer to render into
- * @bufsize: The size of the buffer
- *
- * The OID is rendered into the buffer in "a.b.c.d" format and the number of
- * bytes is returned.
- */
-int sprint_OID(enum OID oid, char *buffer, size_t bufsize)
-{
- int ret;
-
- BUG_ON(oid >= OID__NR);
-
- ret = sprint_oid(oid_data + oid_index[oid],
- oid_index[oid + 1] - oid_index[oid],
- buffer, bufsize);
- BUG_ON(ret == -EBADMSG);
- return ret;
-}
-EXPORT_SYMBOL_GPL(sprint_OID);
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
index 6264e2013f25..b45ceb725780 100644
--- a/lib/pldmfw/pldmfw.c
+++ b/lib/pldmfw/pldmfw.c
@@ -728,6 +728,9 @@ pldm_send_package_data(struct pldmfw_priv *data)
struct pldmfw_record *record = data->matching_record;
const struct pldmfw_ops *ops = data->context->ops;
+ if (!ops->send_package_data)
+ return 0;
+
return ops->send_package_data(data->context, record->package_data,
record->package_data_len);
}
@@ -755,6 +758,9 @@ pldm_send_component_tables(struct pldmfw_priv *data)
if (!test_bit(index, bitmap))
continue;
+ if (!data->context->ops->send_component_table)
+ continue;
+
/* determine whether this is the start, middle, end, or both
* the start and end of the component tables
*/
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index cd2e88ee1f14..dfd3f800ac9b 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -28,10 +28,8 @@ EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = {
#if defined(__i386__) && !defined(__arch_um__)
-#ifdef CONFIG_AS_AVX512
&raid6_avx512x2,
&raid6_avx512x1,
-#endif
&raid6_avx2x2,
&raid6_avx2x1,
&raid6_sse2x2,
@@ -42,11 +40,9 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_mmxx1,
#endif
#if defined(__x86_64__) && !defined(__arch_um__)
-#ifdef CONFIG_AS_AVX512
&raid6_avx512x4,
&raid6_avx512x2,
&raid6_avx512x1,
-#endif
&raid6_avx2x4,
&raid6_avx2x2,
&raid6_avx2x1,
@@ -96,9 +92,7 @@ EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
#ifdef CONFIG_X86
-#ifdef CONFIG_AS_AVX512
&raid6_recov_avx512,
-#endif
&raid6_recov_avx2,
&raid6_recov_ssse3,
#endif
diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c
index 9c3e822e1adf..009bd0adeebf 100644
--- a/lib/raid6/avx512.c
+++ b/lib/raid6/avx512.c
@@ -17,8 +17,6 @@
*
*/
-#ifdef CONFIG_AS_AVX512
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -560,5 +558,3 @@ const struct raid6_calls raid6_avx512x4 = {
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#endif
-
-#endif /* CONFIG_AS_AVX512 */
diff --git a/lib/raid6/recov_avx512.c b/lib/raid6/recov_avx512.c
index fd9e15bf3f30..310c715db313 100644
--- a/lib/raid6/recov_avx512.c
+++ b/lib/raid6/recov_avx512.c
@@ -6,8 +6,6 @@
* Author: Megha Dey <megha.dey@linux.intel.com>
*/
-#ifdef CONFIG_AS_AVX512
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -377,7 +375,3 @@ const struct raid6_recov_calls raid6_recov_avx512 = {
#endif
.priority = 3,
};
-
-#else
-#warning "your version of binutils lacks AVX512 support"
-#endif
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 2abe0076a636..8f2dd2210ba8 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -54,9 +54,6 @@ endif
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += -DCONFIG_X86
- CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
- gcc -c -x assembler - >/dev/null 2>&1 && \
- rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index ce945c17980b..859c251b23ce 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -33,44 +33,73 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
int interval = READ_ONCE(rs->interval);
int burst = READ_ONCE(rs->burst);
unsigned long flags;
- int ret;
+ int ret = 0;
- if (!interval)
- return 1;
+ /*
+ * Zero interval says never limit, otherwise, non-positive burst
+ * says always limit.
+ */
+ if (interval <= 0 || burst <= 0) {
+ WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst);
+ ret = interval == 0 || burst > 0;
+ if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) ||
+ !raw_spin_trylock_irqsave(&rs->lock, flags))
+ goto nolock_ret;
+
+ /* Force re-initialization once re-enabled. */
+ rs->flags &= ~RATELIMIT_INITIALIZED;
+ goto unlock_ret;
+ }
/*
- * If we contend on this state's lock then almost
- * by definition we are too busy to print a message,
- * in addition to the one that will be printed by
- * the entity that is holding the lock already:
+ * If we contend on this state's lock then just check if
+ * the current burst is used or not. It might cause
+ * false positive when we are past the interval and
+ * the current lock owner is just about to reset it.
*/
- if (!raw_spin_trylock_irqsave(&rs->lock, flags))
- return 0;
+ if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
+ if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED &&
+ atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
+ ret = 1;
+ goto nolock_ret;
+ }
- if (!rs->begin)
+ if (!(rs->flags & RATELIMIT_INITIALIZED)) {
rs->begin = jiffies;
+ rs->flags |= RATELIMIT_INITIALIZED;
+ atomic_set(&rs->rs_n_left, rs->burst);
+ }
if (time_is_before_jiffies(rs->begin + interval)) {
- if (rs->missed) {
- if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+ int m;
+
+ /*
+ * Reset rs_n_left ASAP to reduce false positives
+ * in parallel calls, see above.
+ */
+ atomic_set(&rs->rs_n_left, rs->burst);
+ rs->begin = jiffies;
+
+ if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+ m = ratelimit_state_reset_miss(rs);
+ if (m) {
printk_deferred(KERN_WARNING
- "%s: %d callbacks suppressed\n",
- func, rs->missed);
- rs->missed = 0;
+ "%s: %d callbacks suppressed\n", func, m);
}
}
- rs->begin = jiffies;
- rs->printed = 0;
}
- if (burst && burst > rs->printed) {
- rs->printed++;
+
+ /* Note that the burst might be taken by a parallel call. */
+ if (atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
ret = 1;
- } else {
- rs->missed++;
- ret = 0;
- }
+
+unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
+nolock_ret:
+ if (!ret)
+ ratelimit_state_inc_miss(rs);
+
return ret;
}
EXPORT_SYMBOL(___ratelimit);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 989c2d615f92..5114eda6309c 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -297,9 +297,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* N S --> N sl
* / \ \
- * sl sr S
+ * sl Sr S
* \
- * sr
+ * Sr
*
* Note: p might be red, and then both
* p and sl are red after rotation(which
@@ -312,9 +312,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* N sl --> P S
* \ / \
- * S N sr
+ * S N Sr
* \
- * sr
+ * Sr
*/
tmp1 = tmp2->rb_right;
WRITE_ONCE(sibling->rb_left, tmp1);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b58d5ef1a34b..7582dfab7fe3 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -14,29 +14,6 @@
#include <linux/folio_queue.h>
/**
- * sg_next - return the next scatterlist entry in a list
- * @sg: The current sg entry
- *
- * Description:
- * Usually the next entry will be @sg@ + 1, but if this sg element is part
- * of a chained scatterlist, it could jump to the start of a new
- * scatterlist array.
- *
- **/
-struct scatterlist *sg_next(struct scatterlist *sg)
-{
- if (sg_is_last(sg))
- return NULL;
-
- sg++;
- if (unlikely(sg_is_chain(sg)))
- sg = sg_chain_ptr(sg);
-
- return sg;
-}
-EXPORT_SYMBOL(sg_next);
-
-/**
* sg_nents - return total count of entries in scatterlist
* @sg: The scatterlist
*
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 91fa37b5c510..ffb8ead6d4cd 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -138,6 +138,25 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
}
EXPORT_SYMBOL(string_get_size);
+int parse_int_array(const char *buf, size_t count, int **array)
+{
+ int *ints, nints;
+
+ get_options(buf, 0, &nints);
+ if (!nints)
+ return -ENOENT;
+
+ ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
+ if (!ints)
+ return -ENOMEM;
+
+ get_options(buf, nints + 1, ints);
+ *array = ints;
+
+ return 0;
+}
+EXPORT_SYMBOL(parse_int_array);
+
/**
* parse_int_array_user - Split string into a sequence of integers
* @from: The user space buffer to read from
@@ -153,30 +172,14 @@ EXPORT_SYMBOL(string_get_size);
*/
int parse_int_array_user(const char __user *from, size_t count, int **array)
{
- int *ints, nints;
char *buf;
- int ret = 0;
+ int ret;
buf = memdup_user_nul(from, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
- get_options(buf, 0, &nints);
- if (!nints) {
- ret = -ENOENT;
- goto free_buf;
- }
-
- ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
- if (!ints) {
- ret = -ENOMEM;
- goto free_buf;
- }
-
- get_options(buf, nints + 1, ints);
- *array = ints;
-
-free_buf:
+ ret = parse_int_array(buf, count, array);
kfree(buf);
return ret;
}
diff --git a/lib/test_fortify/Makefile b/lib/test_fortify/Makefile
index 1c3f82ad8bb2..399cae880e1d 100644
--- a/lib/test_fortify/Makefile
+++ b/lib/test_fortify/Makefile
@@ -18,10 +18,7 @@ quiet_cmd_gen_fortify_log = CAT $@
$(obj)/test_fortify.log: $(addprefix $(obj)/, $(logs)) FORCE
$(call if_changed,gen_fortify_log)
-# GCC<=7 does not always produce *.d files.
-# Run the tests only for GCC>=8 or Clang.
-always-$(call gcc-min-version, 80000) += test_fortify.log
-always-$(CONFIG_CC_IS_CLANG) += test_fortify.log
+always-y += test_fortify.log
# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
# Pass CFLAGS_KASAN to avoid warnings.
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 064ed0fce75a..f0dd092860ea 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -28,14 +28,20 @@
#define TEST_START_NUM_THREADS 50
#define TEST_START_DRIVER "test_module"
-#define TEST_START_TEST_FS "xfs"
#define TEST_START_TEST_CASE TEST_KMOD_DRIVER
-
static bool force_init_test = false;
-module_param(force_init_test, bool_enable_only, 0644);
+module_param(force_init_test, bool_enable_only, 0444);
MODULE_PARM_DESC(force_init_test,
"Force kicking a test immediately after driver loads");
+static char *start_driver;
+module_param(start_driver, charp, 0444);
+MODULE_PARM_DESC(start_driver,
+ "Module/driver to use for the testing after driver loads");
+static char *start_test_fs;
+module_param(start_test_fs, charp, 0444);
+MODULE_PARM_DESC(start_test_fs,
+ "File system to use for the testing after driver loads");
/*
* For device allocation / registration
@@ -508,6 +514,11 @@ static int __trigger_config_run(struct kmod_test_device *test_dev)
case TEST_KMOD_DRIVER:
return run_test_driver(test_dev);
case TEST_KMOD_FS_TYPE:
+ if (!config->test_fs) {
+ dev_warn(test_dev->dev,
+ "No fs type specified, can't run the test\n");
+ return -EINVAL;
+ }
return run_test_fs_type(test_dev);
default:
dev_warn(test_dev->dev,
@@ -721,26 +732,20 @@ static ssize_t config_test_fs_show(struct device *dev,
static DEVICE_ATTR_RW(config_test_fs);
static int trigger_config_run_type(struct kmod_test_device *test_dev,
- enum kmod_test_case test_case,
- const char *test_str)
+ enum kmod_test_case test_case)
{
- int copied = 0;
struct test_config *config = &test_dev->config;
mutex_lock(&test_dev->config_mutex);
switch (test_case) {
case TEST_KMOD_DRIVER:
- kfree_const(config->test_driver);
- config->test_driver = NULL;
- copied = config_copy_test_driver_name(config, test_str,
- strlen(test_str));
break;
case TEST_KMOD_FS_TYPE:
- kfree_const(config->test_fs);
- config->test_fs = NULL;
- copied = config_copy_test_fs(config, test_str,
- strlen(test_str));
+ if (!config->test_fs) {
+ mutex_unlock(&test_dev->config_mutex);
+ return 0;
+ }
break;
default:
mutex_unlock(&test_dev->config_mutex);
@@ -751,11 +756,6 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
mutex_unlock(&test_dev->config_mutex);
- if (copied <= 0 || copied != strlen(test_str)) {
- test_dev->test_is_oom = true;
- return -ENOMEM;
- }
-
test_dev->test_is_oom = false;
return trigger_config_run(test_dev);
@@ -800,19 +800,24 @@ static unsigned int kmod_init_test_thread_limit(void)
static int __kmod_config_init(struct kmod_test_device *test_dev)
{
struct test_config *config = &test_dev->config;
+ const char *test_start_driver = start_driver ? start_driver :
+ TEST_START_DRIVER;
int ret = -ENOMEM, copied;
__kmod_config_free(config);
- copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
- strlen(TEST_START_DRIVER));
- if (copied != strlen(TEST_START_DRIVER))
+ copied = config_copy_test_driver_name(config, test_start_driver,
+ strlen(test_start_driver));
+ if (copied != strlen(test_start_driver))
goto err_out;
- copied = config_copy_test_fs(config, TEST_START_TEST_FS,
- strlen(TEST_START_TEST_FS));
- if (copied != strlen(TEST_START_TEST_FS))
- goto err_out;
+
+ if (start_test_fs) {
+ copied = config_copy_test_fs(config, start_test_fs,
+ strlen(start_test_fs));
+ if (copied != strlen(start_test_fs))
+ goto err_out;
+ }
config->num_threads = kmod_init_test_thread_limit();
config->test_result = 0;
@@ -1178,12 +1183,11 @@ static int __init test_kmod_init(void)
* lowering the init level for more fun.
*/
if (force_init_test) {
- ret = trigger_config_run_type(test_dev,
- TEST_KMOD_DRIVER, "tun");
+ ret = trigger_config_run_type(test_dev, TEST_KMOD_DRIVER);
if (WARN_ON(ret))
return ret;
- ret = trigger_config_run_type(test_dev,
- TEST_KMOD_FS_TYPE, "btrfs");
+
+ ret = trigger_config_run_type(test_dev, TEST_KMOD_FS_TYPE);
if (WARN_ON(ret))
return ret;
}
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 4249e0cc8aaf..c02aa9c868f2 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -30,15 +30,17 @@ static int i_zero;
static int i_one_hundred = 100;
static int match_int_ok = 1;
+enum {
+ TEST_H_SETUP_NODE,
+ TEST_H_MNT,
+ TEST_H_MNTERROR,
+ TEST_H_EMPTY_ADD,
+ TEST_H_EMPTY,
+ TEST_H_U8,
+ TEST_H_SIZE /* Always at the end */
+};
-static struct {
- struct ctl_table_header *test_h_setup_node;
- struct ctl_table_header *test_h_mnt;
- struct ctl_table_header *test_h_mnterror;
- struct ctl_table_header *empty_add;
- struct ctl_table_header *empty;
-} sysctl_test_headers;
-
+static struct ctl_table_header *ctl_headers[TEST_H_SIZE] = {};
struct test_sysctl_data {
int int_0001;
int int_0002;
@@ -167,8 +169,8 @@ static int test_sysctl_setup_node_tests(void)
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
if (!test_data.bitmap_0001)
return -ENOMEM;
- sysctl_test_headers.test_h_setup_node = register_sysctl("debug/test_sysctl", test_table);
- if (!sysctl_test_headers.test_h_setup_node) {
+ ctl_headers[TEST_H_SETUP_NODE] = register_sysctl("debug/test_sysctl", test_table);
+ if (!ctl_headers[TEST_H_SETUP_NODE]) {
kfree(test_data.bitmap_0001);
return -ENOMEM;
}
@@ -202,12 +204,12 @@ static int test_sysctl_run_unregister_nested(void)
static int test_sysctl_run_register_mount_point(void)
{
- sysctl_test_headers.test_h_mnt
+ ctl_headers[TEST_H_MNT]
= register_sysctl_mount_point("debug/test_sysctl/mnt");
- if (!sysctl_test_headers.test_h_mnt)
+ if (!ctl_headers[TEST_H_MNT])
return -ENOMEM;
- sysctl_test_headers.test_h_mnterror
+ ctl_headers[TEST_H_MNTERROR]
= register_sysctl("debug/test_sysctl/mnt/mnt_error",
test_table_unregister);
/*
@@ -225,39 +227,94 @@ static const struct ctl_table test_table_empty[] = { };
static int test_sysctl_run_register_empty(void)
{
/* Tets that an empty dir can be created */
- sysctl_test_headers.empty_add
+ ctl_headers[TEST_H_EMPTY_ADD]
= register_sysctl("debug/test_sysctl/empty_add", test_table_empty);
- if (!sysctl_test_headers.empty_add)
+ if (!ctl_headers[TEST_H_EMPTY_ADD])
return -ENOMEM;
/* Test that register on top of an empty dir works */
- sysctl_test_headers.empty
+ ctl_headers[TEST_H_EMPTY]
= register_sysctl("debug/test_sysctl/empty_add/empty", test_table_empty);
- if (!sysctl_test_headers.empty)
+ if (!ctl_headers[TEST_H_EMPTY])
return -ENOMEM;
return 0;
}
-static int __init test_sysctl_init(void)
+static const struct ctl_table table_u8_over[] = {
+ {
+ .procname = "u8_over",
+ .data = &test_data.uint_0001,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_FOUR,
+ .extra2 = SYSCTL_ONE_THOUSAND,
+ },
+};
+
+static const struct ctl_table table_u8_under[] = {
+ {
+ .procname = "u8_under",
+ .data = &test_data.uint_0001,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_NEG_ONE,
+ .extra2 = SYSCTL_ONE_HUNDRED,
+ },
+};
+
+static const struct ctl_table table_u8_valid[] = {
+ {
+ .procname = "u8_valid",
+ .data = &test_data.uint_0001,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO_HUNDRED,
+ },
+};
+
+static int test_sysctl_register_u8_extra(void)
{
- int err;
+ /* should fail because it's over */
+ ctl_headers[TEST_H_U8]
+ = register_sysctl("debug/test_sysctl", table_u8_over);
+ if (ctl_headers[TEST_H_U8])
+ return -ENOMEM;
+
+ /* should fail because it's under */
+ ctl_headers[TEST_H_U8]
+ = register_sysctl("debug/test_sysctl", table_u8_under);
+ if (ctl_headers[TEST_H_U8])
+ return -ENOMEM;
- err = test_sysctl_setup_node_tests();
- if (err)
- goto out;
+ /* should not fail because it's valid */
+ ctl_headers[TEST_H_U8]
+ = register_sysctl("debug/test_sysctl", table_u8_valid);
+ if (!ctl_headers[TEST_H_U8])
+ return -ENOMEM;
- err = test_sysctl_run_unregister_nested();
- if (err)
- goto out;
+ return 0;
+}
- err = test_sysctl_run_register_mount_point();
- if (err)
- goto out;
+static int __init test_sysctl_init(void)
+{
+ int err = 0;
+
+ int (*func_array[])(void) = {
+ test_sysctl_setup_node_tests,
+ test_sysctl_run_unregister_nested,
+ test_sysctl_run_register_mount_point,
+ test_sysctl_run_register_empty,
+ test_sysctl_register_u8_extra
+ };
- err = test_sysctl_run_register_empty();
+ for (int i = 0; !err && i < ARRAY_SIZE(func_array); i++)
+ err = func_array[i]();
-out:
return err;
}
module_init(test_sysctl_init);
@@ -265,16 +322,10 @@ module_init(test_sysctl_init);
static void __exit test_sysctl_exit(void)
{
kfree(test_data.bitmap_0001);
- if (sysctl_test_headers.test_h_setup_node)
- unregister_sysctl_table(sysctl_test_headers.test_h_setup_node);
- if (sysctl_test_headers.test_h_mnt)
- unregister_sysctl_table(sysctl_test_headers.test_h_mnt);
- if (sysctl_test_headers.test_h_mnterror)
- unregister_sysctl_table(sysctl_test_headers.test_h_mnterror);
- if (sysctl_test_headers.empty)
- unregister_sysctl_table(sysctl_test_headers.empty);
- if (sysctl_test_headers.empty_add)
- unregister_sysctl_table(sysctl_test_headers.empty_add);
+ for (int i = 0; i < TEST_H_SIZE; i++) {
+ if (ctl_headers[i])
+ unregister_sysctl_table(ctl_headers[i]);
+ }
}
module_exit(test_sysctl_exit);
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index f585949ff696..1b0b59549aaf 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -13,9 +13,9 @@
#include <linux/moduleparam.h>
#include <linux/completion.h>
#include <linux/delay.h>
-#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/rcupdate.h>
+#include <linux/srcu.h>
#include <linux/slab.h>
#define __param(type, name, init, msg) \
@@ -58,10 +58,9 @@ __param(int, run_test_mask, INT_MAX,
);
/*
- * Read write semaphore for synchronization of setup
- * phase that is done in main thread and workers.
+ * This is for synchronization of setup phase.
*/
-static DECLARE_RWSEM(prepare_for_test_rwsem);
+DEFINE_STATIC_SRCU(prepare_for_test_srcu);
/*
* Completion tracking for worker threads.
@@ -458,7 +457,7 @@ static int test_func(void *private)
/*
* Block until initialization is done.
*/
- down_read(&prepare_for_test_rwsem);
+ synchronize_srcu(&prepare_for_test_srcu);
t->start = get_cycles();
for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
@@ -487,8 +486,6 @@ static int test_func(void *private)
t->data[index].time = delta;
}
t->stop = get_cycles();
-
- up_read(&prepare_for_test_rwsem);
test_report_one_done();
/*
@@ -526,7 +523,7 @@ init_test_configuration(void)
static void do_concurrent_test(void)
{
- int i, ret;
+ int i, ret, idx;
/*
* Set some basic configurations plus sanity check.
@@ -538,7 +535,7 @@ static void do_concurrent_test(void)
/*
* Put on hold all workers.
*/
- down_write(&prepare_for_test_rwsem);
+ idx = srcu_read_lock(&prepare_for_test_srcu);
for (i = 0; i < nr_threads; i++) {
struct test_driver *t = &tdriver[i];
@@ -555,7 +552,7 @@ static void do_concurrent_test(void)
/*
* Now let the workers do their job.
*/
- up_write(&prepare_for_test_rwsem);
+ srcu_read_unlock(&prepare_for_test_srcu, idx);
/*
* Sleep quiet until all workers are done with 1 second
@@ -594,10 +591,11 @@ static void do_concurrent_test(void)
kvfree(tdriver);
}
-static int vmalloc_test_init(void)
+static int __init vmalloc_test_init(void)
{
do_concurrent_test();
- return -EAGAIN; /* Fail will directly unload the module */
+ /* Fail will directly unload the module */
+ return IS_BUILTIN(CONFIG_TEST_VMALLOC) ? 0:-EAGAIN;
}
module_init(vmalloc_test_init)
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 080a39d22e73..5ca0aefee9aa 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1040,6 +1040,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
unsigned int i, id;
unsigned long index;
void *entry;
+ int ret;
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
&next, GFP_KERNEL) != 0);
@@ -1059,7 +1060,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
else
entry = xa_mk_index(i - 0x3fff);
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
- &next, GFP_KERNEL) != (id == 1));
+ &next, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_mk_index(id) != entry);
}
@@ -1072,7 +1073,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
xa_limit_32b, &next, GFP_KERNEL) != 0);
XA_BUG_ON(xa, id != UINT_MAX);
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
- xa_limit_32b, &next, GFP_KERNEL) != 1);
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
XA_BUG_ON(xa, id != base);
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
xa_limit_32b, &next, GFP_KERNEL) != 0);
@@ -1080,7 +1081,19 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
xa_for_each(xa, index, entry)
xa_erase_index(xa, index);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ /* check wrap-around return of __xa_alloc_cyclic() */
+ next = UINT_MAX;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ xa_lock(xa);
+ ret = __xa_alloc_cyclic(xa, &id, xa_mk_index(base), xa_limit_32b,
+ &next, GFP_KERNEL);
+ xa_unlock(xa);
+ XA_BUG_ON(xa, ret != 1);
+ xa_for_each(xa, index, entry)
+ xa_erase_index(xa, index);
XA_BUG_ON(xa, !xa_empty(xa));
}
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 5a4794c1826e..56d645014482 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
+obj-$(CONFIG_RANDSTRUCT_KUNIT_TEST) += randstruct_kunit.o
obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
diff --git a/lib/tests/crc_kunit.c b/lib/tests/crc_kunit.c
index 585c48b65cef..064c2d581557 100644
--- a/lib/tests/crc_kunit.c
+++ b/lib/tests/crc_kunit.c
@@ -391,17 +391,11 @@ static u64 crc32c_wrapper(u64 crc, const u8 *p, size_t len)
return crc32c(crc, p, len);
}
-static u64 crc32c_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
-{
- return crc32c_combine(crc1, crc2, len2);
-}
-
static const struct crc_variant crc_variant_crc32c = {
.bits = 32,
.le = true,
.poly = 0x82f63b78,
.func = crc32c_wrapper,
- .combine_func = crc32c_combine_wrapper,
};
static void crc32c_test(struct kunit *test)
diff --git a/lib/tests/overflow_kunit.c b/lib/tests/overflow_kunit.c
index 894691b4411a..19cb03b25dc5 100644
--- a/lib/tests/overflow_kunit.c
+++ b/lib/tests/overflow_kunit.c
@@ -1210,6 +1210,10 @@ static void DEFINE_FLEX_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, __struct_size(empty->array), 0);
KUNIT_EXPECT_EQ(test, __member_size(empty->array), 0);
+ KUNIT_EXPECT_EQ(test, STACK_FLEX_ARRAY_SIZE(two, array), 2);
+ KUNIT_EXPECT_EQ(test, STACK_FLEX_ARRAY_SIZE(eight, array), 8);
+ KUNIT_EXPECT_EQ(test, STACK_FLEX_ARRAY_SIZE(empty, array), 0);
+
/* If __counted_by is not being used, array size will have the on-stack size. */
if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
array_size_override = 2 * sizeof(s16);
diff --git a/lib/tests/printf_kunit.c b/lib/tests/printf_kunit.c
index 2c9f6170bacd..bc54cca2d7a6 100644
--- a/lib/tests/printf_kunit.c
+++ b/lib/tests/printf_kunit.c
@@ -701,21 +701,46 @@ static void fwnode_pointer(struct kunit *kunittest)
software_node_unregister_node_group(group);
}
+struct fourcc_struct {
+ u32 code;
+ const char *str;
+};
+
+static void fourcc_pointer_test(struct kunit *kunittest, const struct fourcc_struct *fc,
+ size_t n, const char *fmt)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ test(fc[i].str, fmt, &fc[i].code);
+}
+
static void fourcc_pointer(struct kunit *kunittest)
{
- struct {
- u32 code;
- char *str;
- } const try[] = {
+ static const struct fourcc_struct try_cc[] = {
{ 0x3231564e, "NV12 little-endian (0x3231564e)", },
{ 0xb231564e, "NV12 big-endian (0xb231564e)", },
{ 0x10111213, ".... little-endian (0x10111213)", },
{ 0x20303159, "Y10 little-endian (0x20303159)", },
};
- unsigned int i;
+ static const struct fourcc_struct try_ch[] = {
+ { 0x41424344, "ABCD (0x41424344)", },
+ };
+ static const struct fourcc_struct try_chR[] = {
+ { 0x41424344, "DCBA (0x44434241)", },
+ };
+ static const struct fourcc_struct try_cl[] = {
+ { (__force u32)cpu_to_le32(0x41424344), "ABCD (0x41424344)", },
+ };
+ static const struct fourcc_struct try_cb[] = {
+ { (__force u32)cpu_to_be32(0x41424344), "ABCD (0x41424344)", },
+ };
- for (i = 0; i < ARRAY_SIZE(try); i++)
- test(try[i].str, "%p4cc", &try[i].code);
+ fourcc_pointer_test(kunittest, try_cc, ARRAY_SIZE(try_cc), "%p4cc");
+ fourcc_pointer_test(kunittest, try_ch, ARRAY_SIZE(try_ch), "%p4ch");
+ fourcc_pointer_test(kunittest, try_chR, ARRAY_SIZE(try_chR), "%p4chR");
+ fourcc_pointer_test(kunittest, try_cl, ARRAY_SIZE(try_cl), "%p4cl");
+ fourcc_pointer_test(kunittest, try_cb, ARRAY_SIZE(try_cb), "%p4cb");
}
static void
diff --git a/lib/tests/randstruct_kunit.c b/lib/tests/randstruct_kunit.c
new file mode 100644
index 000000000000..f3a2d63c4cfb
--- /dev/null
+++ b/lib/tests/randstruct_kunit.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test cases for struct randomization, i.e. CONFIG_RANDSTRUCT=y.
+ *
+ * For example, see:
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
+ * ./tools/testing/kunit/kunit.py run randstruct [--raw_output] \
+ * [--make_option LLVM=1] \
+ * --kconfig_add CONFIG_RANDSTRUCT_FULL=y
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#define DO_MANY_MEMBERS(macro, args...) \
+ macro(a, args) \
+ macro(b, args) \
+ macro(c, args) \
+ macro(d, args) \
+ macro(e, args) \
+ macro(f, args) \
+ macro(g, args) \
+ macro(h, args)
+
+#define do_enum(x, ignored) MEMBER_NAME_ ## x,
+enum randstruct_member_names {
+ DO_MANY_MEMBERS(do_enum)
+ MEMBER_NAME_MAX,
+};
+/* Make sure the macros are working: want 8 test members. */
+_Static_assert(MEMBER_NAME_MAX == 8, "Number of test members changed?!");
+
+/* This is an unsigned long member to match the function pointer size */
+#define unsigned_long_member(x, ignored) unsigned long x;
+struct randstruct_untouched {
+ DO_MANY_MEMBERS(unsigned_long_member)
+};
+
+/* Struct explicitly marked with __randomize_layout. */
+struct randstruct_shuffled {
+ DO_MANY_MEMBERS(unsigned_long_member)
+} __randomize_layout;
+#undef unsigned_long_member
+
+/* Struct implicitly randomized from being all func ptrs. */
+#define func_member(x, ignored) size_t (*x)(int);
+struct randstruct_funcs_untouched {
+ DO_MANY_MEMBERS(func_member)
+} __no_randomize_layout;
+
+struct randstruct_funcs_shuffled {
+ DO_MANY_MEMBERS(func_member)
+};
+
+#define func_body(x, ignored) \
+static noinline size_t func_##x(int arg) \
+{ \
+ return offsetof(struct randstruct_funcs_untouched, x); \
+}
+DO_MANY_MEMBERS(func_body)
+
+/* Various mixed types. */
+#define mixed_members \
+ bool a; \
+ short b; \
+ unsigned int c __aligned(16); \
+ size_t d; \
+ char e; \
+ u64 f; \
+ union { \
+ struct randstruct_shuffled shuffled; \
+ uintptr_t g; \
+ }; \
+ union { \
+ void *ptr; \
+ char h; \
+ };
+
+struct randstruct_mixed_untouched {
+ mixed_members
+};
+
+struct randstruct_mixed_shuffled {
+ mixed_members
+} __randomize_layout;
+#undef mixed_members
+
+struct contains_randstruct_untouched {
+ int before;
+ struct randstruct_untouched untouched;
+ int after;
+};
+
+struct contains_randstruct_shuffled {
+ int before;
+ struct randstruct_shuffled shuffled;
+ int after;
+};
+
+struct contains_func_untouched {
+ struct randstruct_funcs_shuffled inner;
+ DO_MANY_MEMBERS(func_member)
+} __no_randomize_layout;
+
+struct contains_func_shuffled {
+ struct randstruct_funcs_shuffled inner;
+ DO_MANY_MEMBERS(func_member)
+};
+#undef func_member
+
+#define check_mismatch(x, untouched, shuffled) \
+ if (offsetof(untouched, x) != offsetof(shuffled, x)) \
+ mismatches++; \
+ kunit_info(test, #shuffled "::" #x " @ %zu (vs %zu)\n", \
+ offsetof(shuffled, x), \
+ offsetof(untouched, x)); \
+
+#define check_pair(outcome, untouched, shuffled, checker...) \
+ mismatches = 0; \
+ DO_MANY_MEMBERS(checker, untouched, shuffled) \
+ kunit_info(test, "Differing " #untouched " vs " #shuffled " member positions: %d\n", \
+ mismatches); \
+ KUNIT_##outcome##_MSG(test, mismatches, 0, \
+ #untouched " vs " #shuffled " layouts: unlucky or broken?\n");
+
+static void randstruct_layout_same(struct kunit *test)
+{
+ int mismatches;
+
+ check_pair(EXPECT_EQ, struct randstruct_untouched, struct randstruct_untouched,
+ check_mismatch)
+ check_pair(EXPECT_GT, struct randstruct_untouched, struct randstruct_shuffled,
+ check_mismatch)
+}
+
+static void randstruct_layout_mixed(struct kunit *test)
+{
+ int mismatches;
+
+ check_pair(EXPECT_EQ, struct randstruct_mixed_untouched, struct randstruct_mixed_untouched,
+ check_mismatch)
+ check_pair(EXPECT_GT, struct randstruct_mixed_untouched, struct randstruct_mixed_shuffled,
+ check_mismatch)
+}
+
+static void randstruct_layout_fptr(struct kunit *test)
+{
+ int mismatches;
+
+ check_pair(EXPECT_EQ, struct randstruct_untouched, struct randstruct_untouched,
+ check_mismatch)
+ check_pair(EXPECT_GT, struct randstruct_untouched, struct randstruct_funcs_shuffled,
+ check_mismatch)
+ check_pair(EXPECT_GT, struct randstruct_funcs_untouched, struct randstruct_funcs_shuffled,
+ check_mismatch)
+}
+
+#define check_mismatch_prefixed(x, prefix, untouched, shuffled) \
+ check_mismatch(prefix.x, untouched, shuffled)
+
+static void randstruct_layout_fptr_deep(struct kunit *test)
+{
+ int mismatches;
+
+ if (IS_ENABLED(CONFIG_CC_IS_CLANG))
+ kunit_skip(test, "Clang randstruct misses inner functions: https://github.com/llvm/llvm-project/issues/138355");
+
+ check_pair(EXPECT_EQ, struct contains_func_untouched, struct contains_func_untouched,
+ check_mismatch_prefixed, inner)
+
+ check_pair(EXPECT_GT, struct contains_func_untouched, struct contains_func_shuffled,
+ check_mismatch_prefixed, inner)
+}
+
+#undef check_pair
+#undef check_mismatch
+
+#define check_mismatch(x, ignore) \
+ KUNIT_EXPECT_EQ_MSG(test, untouched->x, shuffled->x, \
+ "Mismatched member value in %s initializer\n", \
+ name);
+
+static void test_check_init(struct kunit *test, const char *name,
+ struct randstruct_untouched *untouched,
+ struct randstruct_shuffled *shuffled)
+{
+ DO_MANY_MEMBERS(check_mismatch)
+}
+
+static void test_check_mixed_init(struct kunit *test, const char *name,
+ struct randstruct_mixed_untouched *untouched,
+ struct randstruct_mixed_shuffled *shuffled)
+{
+ DO_MANY_MEMBERS(check_mismatch)
+}
+#undef check_mismatch
+
+#define check_mismatch(x, ignore) \
+ KUNIT_EXPECT_EQ_MSG(test, untouched->untouched.x, \
+ shuffled->shuffled.x, \
+ "Mismatched member value in %s initializer\n", \
+ name);
+static void test_check_contained_init(struct kunit *test, const char *name,
+ struct contains_randstruct_untouched *untouched,
+ struct contains_randstruct_shuffled *shuffled)
+{
+ DO_MANY_MEMBERS(check_mismatch)
+}
+#undef check_mismatch
+
+#define check_mismatch(x, ignore) \
+ KUNIT_EXPECT_PTR_EQ_MSG(test, untouched->x, shuffled->x, \
+ "Mismatched member value in %s initializer\n", \
+ name);
+
+static void test_check_funcs_init(struct kunit *test, const char *name,
+ struct randstruct_funcs_untouched *untouched,
+ struct randstruct_funcs_shuffled *shuffled)
+{
+ DO_MANY_MEMBERS(check_mismatch)
+}
+#undef check_mismatch
+
+static void randstruct_initializers(struct kunit *test)
+{
+#define init_members \
+ .a = 1, \
+ .b = 3, \
+ .c = 5, \
+ .d = 7, \
+ .e = 11, \
+ .f = 13, \
+ .g = 17, \
+ .h = 19,
+ struct randstruct_untouched untouched = {
+ init_members
+ };
+ struct randstruct_shuffled shuffled = {
+ init_members
+ };
+ struct randstruct_mixed_untouched mixed_untouched = {
+ init_members
+ };
+ struct randstruct_mixed_shuffled mixed_shuffled = {
+ init_members
+ };
+ struct contains_randstruct_untouched contains_untouched = {
+ .untouched = {
+ init_members
+ },
+ };
+ struct contains_randstruct_shuffled contains_shuffled = {
+ .shuffled = {
+ init_members
+ },
+ };
+#define func_member(x, ignored) \
+ .x = func_##x,
+ struct randstruct_funcs_untouched funcs_untouched = {
+ DO_MANY_MEMBERS(func_member)
+ };
+ struct randstruct_funcs_shuffled funcs_shuffled = {
+ DO_MANY_MEMBERS(func_member)
+ };
+
+ test_check_init(test, "named", &untouched, &shuffled);
+ test_check_init(test, "unnamed", &untouched,
+ &(struct randstruct_shuffled){
+ init_members
+ });
+
+ test_check_contained_init(test, "named", &contains_untouched, &contains_shuffled);
+ test_check_contained_init(test, "unnamed", &contains_untouched,
+ &(struct contains_randstruct_shuffled){
+ .shuffled = (struct randstruct_shuffled){
+ init_members
+ },
+ });
+
+ test_check_contained_init(test, "named", &contains_untouched, &contains_shuffled);
+ test_check_contained_init(test, "unnamed copy", &contains_untouched,
+ &(struct contains_randstruct_shuffled){
+ /* full struct copy initializer */
+ .shuffled = shuffled,
+ });
+
+ test_check_mixed_init(test, "named", &mixed_untouched, &mixed_shuffled);
+ test_check_mixed_init(test, "unnamed", &mixed_untouched,
+ &(struct randstruct_mixed_shuffled){
+ init_members
+ });
+
+ test_check_funcs_init(test, "named", &funcs_untouched, &funcs_shuffled);
+ test_check_funcs_init(test, "unnamed", &funcs_untouched,
+ &(struct randstruct_funcs_shuffled){
+ DO_MANY_MEMBERS(func_member)
+ });
+
+#undef func_member
+#undef init_members
+}
+
+static int randstruct_test_init(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_RANDSTRUCT))
+ kunit_skip(test, "Not built with CONFIG_RANDSTRUCT=y");
+
+ return 0;
+}
+
+static struct kunit_case randstruct_test_cases[] = {
+ KUNIT_CASE(randstruct_layout_same),
+ KUNIT_CASE(randstruct_layout_mixed),
+ KUNIT_CASE(randstruct_layout_fptr),
+ KUNIT_CASE(randstruct_layout_fptr_deep),
+ KUNIT_CASE(randstruct_initializers),
+ {}
+};
+
+static struct kunit_suite randstruct_test_suite = {
+ .name = "randstruct",
+ .init = randstruct_test_init,
+ .test_cases = randstruct_test_cases,
+};
+
+kunit_test_suites(&randstruct_test_suite);
+
+MODULE_DESCRIPTION("Test cases for struct randomization");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/stackinit_kunit.c b/lib/tests/stackinit_kunit.c
index 63aa78e6f5c1..ff2784769772 100644
--- a/lib/tests/stackinit_kunit.c
+++ b/lib/tests/stackinit_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test cases for compiler-based stack variable zeroing via
- * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * -ftrivial-auto-var-init={zero,pattern}.
* For example, see:
* "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
* ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
@@ -376,14 +376,6 @@ union test_small_end {
# define USER_PASS XFAIL
# define BYREF_PASS XFAIL
# define STRONG_PASS XFAIL
-#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
-# define USER_PASS WANT_SUCCESS
-# define BYREF_PASS XFAIL
-# define STRONG_PASS XFAIL
-#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
-# define USER_PASS WANT_SUCCESS
-# define BYREF_PASS WANT_SUCCESS
-# define STRONG_PASS XFAIL
#else
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS WANT_SUCCESS
diff --git a/lib/tests/usercopy_kunit.c b/lib/tests/usercopy_kunit.c
index 77fa00a13df7..80f8abe10968 100644
--- a/lib/tests/usercopy_kunit.c
+++ b/lib/tests/usercopy_kunit.c
@@ -27,6 +27,7 @@
!defined(CONFIG_MICROBLAZE) && \
!defined(CONFIG_NIOS2) && \
!defined(CONFIG_PPC32) && \
+ !defined(CONFIG_SPARC32) && \
!defined(CONFIG_SUPERH))
# define TEST_U64
#endif
diff --git a/lib/ubsan.c b/lib/ubsan.c
index cdc1d31c3821..a6ca235dd714 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -19,13 +19,13 @@
#include "ubsan.h"
-#ifdef CONFIG_UBSAN_TRAP
+#if defined(CONFIG_UBSAN_TRAP) || defined(CONFIG_UBSAN_KVM_EL2)
/*
* Only include matches for UBSAN checks that are actually compiled in.
* The mappings of struct SanitizerKind (the -fsanitize=xxx args) to
* enum SanitizerHandler (the traps) in Clang is in clang/lib/CodeGen/.
*/
-const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
+const char *report_ubsan_failure(u32 check_type)
{
switch (check_type) {
#ifdef CONFIG_UBSAN_BOUNDS
@@ -97,7 +97,9 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
}
}
-#else
+#endif
+
+#ifndef CONFIG_UBSAN_TRAP
static const char * const type_check_kinds[] = {
"load of",
"store to",
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 01699852f30c..3d85800757aa 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1793,27 +1793,49 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc,
char output[sizeof("0123 little-endian (0x01234567)")];
char *p = output;
unsigned int i;
+ bool pixel_fmt = false;
u32 orig, val;
- if (fmt[1] != 'c' || fmt[2] != 'c')
+ if (fmt[1] != 'c')
return error_string(buf, end, "(%p4?)", spec);
if (check_pointer(&buf, end, fourcc, spec))
return buf;
orig = get_unaligned(fourcc);
- val = orig & ~BIT(31);
+ switch (fmt[2]) {
+ case 'h':
+ if (fmt[3] == 'R')
+ orig = swab32(orig);
+ break;
+ case 'l':
+ orig = (__force u32)cpu_to_le32(orig);
+ break;
+ case 'b':
+ orig = (__force u32)cpu_to_be32(orig);
+ break;
+ case 'c':
+ /* Pixel formats are printed LSB-first */
+ pixel_fmt = true;
+ break;
+ default:
+ return error_string(buf, end, "(%p4?)", spec);
+ }
+
+ val = pixel_fmt ? swab32(orig & ~BIT(31)) : orig;
for (i = 0; i < sizeof(u32); i++) {
- unsigned char c = val >> (i * 8);
+ unsigned char c = val >> ((3 - i) * 8);
/* Print non-control ASCII characters as-is, dot otherwise */
*p++ = isascii(c) && isprint(c) ? c : '.';
}
- *p++ = ' ';
- strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
- p += strlen(p);
+ if (pixel_fmt) {
+ *p++ = ' ';
+ strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
+ p += strlen(p);
+ }
*p++ = ' ';
*p++ = '(';
@@ -1981,15 +2003,11 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
if (check_pointer(&buf, end, clk, spec))
return buf;
- switch (fmt[1]) {
- case 'n':
- default:
#ifdef CONFIG_COMMON_CLK
- return string(buf, end, __clk_get_name(clk), spec);
+ return string(buf, end, __clk_get_name(clk), spec);
#else
- return ptr_to_id(buf, end, clk, spec);
+ return ptr_to_id(buf, end, clk, spec);
#endif
- }
}
static
@@ -2374,6 +2392,12 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* read the documentation (path below) first.
* - 'NF' For a netdev_features_t
* - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
+ * - '4c[h[R]lb]' For generic FourCC code with raw numerical value. Both are
+ * displayed in the big-endian format. This is the opposite of V4L2 or
+ * DRM FourCCs.
+ * The additional specifiers define what endianness is used to load
+ * the stored bytes. The data might be interpreted using the host,
+ * reversed host byte order, little-endian, or big-endian.
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
* C colon
@@ -2391,8 +2415,6 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
- * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
- * (legacy clock framework) of the clock
* - 'G' For flags to be printed as a collection of symbolic strings that would
* construct the specific value. Supported flags given by option:
* p page flags (see struct page) given as pointer to unsigned long
diff --git a/lib/xarray.c b/lib/xarray.c
index 9644b18af18d..76dde3a1cacf 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1742,20 +1742,23 @@ static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp);
/**
- * __xa_cmpxchg() - Store this entry in the XArray.
+ * __xa_cmpxchg() - Conditionally replace an entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
- * @entry: New entry.
+ * @entry: New value to place in array.
* @gfp: Memory allocation flags.
*
* You must already be holding the xa_lock when calling this function.
* It will drop the lock if needed to allocate memory, and then reacquire
* it afterwards.
*
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
- * Return: The old entry at this index or xa_err() if an error happened.
+ * Return: The old value at this index or xa_err() if an error happened.
*/
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)