summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug63
-rw-r--r--lib/base64.c189
-rw-r--r--lib/dynamic_debug.c1
-rw-r--r--lib/math/div64.c185
-rw-r--r--lib/math/test_mul_u64_u64_div_u64.c191
-rw-r--r--lib/plist.c4
-rw-r--r--lib/ratelimit.c2
-rw-r--r--lib/rbtree.c29
-rw-r--r--lib/sys_info.c169
-rw-r--r--lib/test_kho.c140
-rw-r--r--lib/tests/Makefile1
-rw-r--r--lib/tests/base64_kunit.c294
-rw-r--r--lib/usercopy.c4
-rw-r--r--lib/xxhash.c29
-rw-r--r--lib/xz/xz_dec_bcj.c95
-rw-r--r--lib/xz/xz_private.h4
16 files changed, 927 insertions, 473 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c2654075377e..ba36939fda79 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -342,8 +342,7 @@ config DEBUG_INFO_COMPRESSED_ZLIB
depends on $(cc-option,-gz=zlib)
depends on $(ld-option,--compress-debug-sections=zlib)
help
- Compress the debug information using zlib. Requires GCC 5.0+ or Clang
- 5.0+, binutils 2.26+, and zlib.
+ Compress the debug information using zlib.
Users of dpkg-deb via debian/rules may find an increase in
size of their debug .deb packages with this config set, due to the
@@ -493,23 +492,23 @@ config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
depends on CC_IS_GCC
help
- The section mismatch analysis checks if there are illegal
- references from one section to another section.
- During linktime or runtime, some sections are dropped;
- any use of code/data previously in these sections would
- most likely result in an oops.
- In the code, functions and variables are annotated with
- __init,, etc. (see the full list in include/linux/init.h),
- which results in the code/data being placed in specific sections.
+ The section mismatch analysis checks if there are illegal references
+ from one section to another. During linktime or runtime, some
+ sections are dropped; any use of code/data previously in these
+ sections would most likely result in an oops.
+
+ In the code, functions and variables are annotated with __init,
+ __initdata, and so on (see the full list in include/linux/init.h).
+ This directs the toolchain to place code/data in specific sections.
+
The section mismatch analysis is always performed after a full
- kernel build, and enabling this option causes the following
- additional step to occur:
- - Add the option -fno-inline-functions-called-once to gcc commands.
- When inlining a function annotated with __init in a non-init
- function, we would lose the section information and thus
- the analysis would not catch the illegal reference.
- This option tells gcc to inline less (but it does result in
- a larger kernel).
+ kernel build, and enabling this option causes the option
+ -fno-inline-functions-called-once to be added to gcc commands.
+
+ However, when inlining a function annotated with __init in
+ a non-init function, we would lose the section information and thus
+ the analysis would not catch the illegal reference. This option
+ tells gcc to inline less (but it does result in a larger kernel).
config SECTION_MISMATCH_WARN_ONLY
bool "Make section mismatch errors non-fatal"
@@ -1260,12 +1259,13 @@ config DEFAULT_HUNG_TASK_TIMEOUT
Keeping the default should be fine in most cases.
config BOOTPARAM_HUNG_TASK_PANIC
- bool "Panic (Reboot) On Hung Tasks"
+ int "Number of hung tasks to trigger kernel panic"
depends on DETECT_HUNG_TASK
+ default 0
help
- Say Y here to enable the kernel to panic on "hung tasks",
- which are bugs that cause the kernel to leave a task stuck
- in uninterruptible "D" state.
+ When set to a non-zero value, a kernel panic will be triggered
+ if the number of hung tasks found during a single scan reaches
+ this value.
The panic can be used in combination with panic_timeout,
to cause the system to reboot automatically after a
@@ -2817,8 +2817,25 @@ config CMDLINE_KUNIT_TEST
If unsure, say N.
+config BASE64_KUNIT
+ tristate "KUnit test for base64 decoding and encoding" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the base64 unit tests.
+
+ The tests cover the encoding and decoding logic of Base64 functions
+ in the kernel.
+ In addition to correctness checks, simple performance benchmarks
+ for both encoding and decoding are also included.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config BITS_TEST
- tristate "KUnit test for bits.h" if !KUNIT_ALL_TESTS
+ tristate "KUnit test for bit functions and macros" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
diff --git a/lib/base64.c b/lib/base64.c
index b736a7a431c5..41961a444028 100644
--- a/lib/base64.c
+++ b/lib/base64.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * base64.c - RFC4648-compliant base64 encoding
+ * base64.c - Base64 with support for multiple variants
*
* Copyright (c) 2020 Hannes Reinecke, SUSE
*
* Based on the base64url routines from fs/crypto/fname.c
- * (which are using the URL-safe base64 encoding),
- * modified to use the standard coding table from RFC4648 section 4.
+ * (which are using the URL-safe Base64 encoding),
+ * modified to support multiple Base64 variants.
*/
#include <linux/kernel.h>
@@ -15,89 +15,170 @@
#include <linux/string.h>
#include <linux/base64.h>
-static const char base64_table[65] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+static const char base64_tables[][65] = {
+ [BASE64_STD] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
+ [BASE64_URLSAFE] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_",
+ [BASE64_IMAP] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,",
+};
+/*
+ * Initialize the base64 reverse mapping for a single character
+ * This macro maps a character to its corresponding base64 value,
+ * returning -1 if the character is invalid.
+ * char 'A'-'Z' maps to 0-25, 'a'-'z' maps to 26-51, '0'-'9' maps to 52-61,
+ * ch_62 maps to 62, ch_63 maps to 63, and other characters return -1
+ */
+#define INIT_1(v, ch_62, ch_63) \
+ [v] = (v) >= 'A' && (v) <= 'Z' ? (v) - 'A' \
+ : (v) >= 'a' && (v) <= 'z' ? (v) - 'a' + 26 \
+ : (v) >= '0' && (v) <= '9' ? (v) - '0' + 52 \
+ : (v) == (ch_62) ? 62 : (v) == (ch_63) ? 63 : -1
+
+/*
+ * Recursive macros to generate multiple Base64 reverse mapping table entries.
+ * Each macro generates a sequence of entries in the lookup table:
+ * INIT_2 generates 2 entries, INIT_4 generates 4, INIT_8 generates 8, and so on up to INIT_32.
+ */
+#define INIT_2(v, ...) INIT_1(v, __VA_ARGS__), INIT_1((v) + 1, __VA_ARGS__)
+#define INIT_4(v, ...) INIT_2(v, __VA_ARGS__), INIT_2((v) + 2, __VA_ARGS__)
+#define INIT_8(v, ...) INIT_4(v, __VA_ARGS__), INIT_4((v) + 4, __VA_ARGS__)
+#define INIT_16(v, ...) INIT_8(v, __VA_ARGS__), INIT_8((v) + 8, __VA_ARGS__)
+#define INIT_32(v, ...) INIT_16(v, __VA_ARGS__), INIT_16((v) + 16, __VA_ARGS__)
+
+#define BASE64_REV_INIT(ch_62, ch_63) { \
+ [0 ... 0x1f] = -1, \
+ INIT_32(0x20, ch_62, ch_63), \
+ INIT_32(0x40, ch_62, ch_63), \
+ INIT_32(0x60, ch_62, ch_63), \
+ [0x80 ... 0xff] = -1 }
+
+static const s8 base64_rev_maps[][256] = {
+ [BASE64_STD] = BASE64_REV_INIT('+', '/'),
+ [BASE64_URLSAFE] = BASE64_REV_INIT('-', '_'),
+ [BASE64_IMAP] = BASE64_REV_INIT('+', ',')
+};
+
+#undef BASE64_REV_INIT
+#undef INIT_32
+#undef INIT_16
+#undef INIT_8
+#undef INIT_4
+#undef INIT_2
+#undef INIT_1
/**
- * base64_encode() - base64-encode some binary data
+ * base64_encode() - Base64-encode some binary data
* @src: the binary data to encode
* @srclen: the length of @src in bytes
- * @dst: (output) the base64-encoded string. Not NUL-terminated.
+ * @dst: (output) the Base64-encoded string. Not NUL-terminated.
+ * @padding: whether to append '=' padding characters
+ * @variant: which base64 variant to use
*
- * Encodes data using base64 encoding, i.e. the "Base 64 Encoding" specified
- * by RFC 4648, including the '='-padding.
+ * Encodes data using the selected Base64 variant.
*
- * Return: the length of the resulting base64-encoded string in bytes.
+ * Return: the length of the resulting Base64-encoded string in bytes.
*/
-int base64_encode(const u8 *src, int srclen, char *dst)
+int base64_encode(const u8 *src, int srclen, char *dst, bool padding, enum base64_variant variant)
{
u32 ac = 0;
- int bits = 0;
- int i;
char *cp = dst;
+ const char *base64_table = base64_tables[variant];
- for (i = 0; i < srclen; i++) {
- ac = (ac << 8) | src[i];
- bits += 8;
- do {
- bits -= 6;
- *cp++ = base64_table[(ac >> bits) & 0x3f];
- } while (bits >= 6);
- }
- if (bits) {
- *cp++ = base64_table[(ac << (6 - bits)) & 0x3f];
- bits -= 6;
+ while (srclen >= 3) {
+ ac = src[0] << 16 | src[1] << 8 | src[2];
+ *cp++ = base64_table[ac >> 18];
+ *cp++ = base64_table[(ac >> 12) & 0x3f];
+ *cp++ = base64_table[(ac >> 6) & 0x3f];
+ *cp++ = base64_table[ac & 0x3f];
+
+ src += 3;
+ srclen -= 3;
}
- while (bits < 0) {
- *cp++ = '=';
- bits += 2;
+
+ switch (srclen) {
+ case 2:
+ ac = src[0] << 16 | src[1] << 8;
+ *cp++ = base64_table[ac >> 18];
+ *cp++ = base64_table[(ac >> 12) & 0x3f];
+ *cp++ = base64_table[(ac >> 6) & 0x3f];
+ if (padding)
+ *cp++ = '=';
+ break;
+ case 1:
+ ac = src[0] << 16;
+ *cp++ = base64_table[ac >> 18];
+ *cp++ = base64_table[(ac >> 12) & 0x3f];
+ if (padding) {
+ *cp++ = '=';
+ *cp++ = '=';
+ }
+ break;
}
return cp - dst;
}
EXPORT_SYMBOL_GPL(base64_encode);
/**
- * base64_decode() - base64-decode a string
+ * base64_decode() - Base64-decode a string
* @src: the string to decode. Doesn't need to be NUL-terminated.
* @srclen: the length of @src in bytes
* @dst: (output) the decoded binary data
+ * @padding: whether to append '=' padding characters
+ * @variant: which base64 variant to use
*
- * Decodes a string using base64 encoding, i.e. the "Base 64 Encoding"
- * specified by RFC 4648, including the '='-padding.
- *
- * This implementation hasn't been optimized for performance.
+ * Decodes a string using the selected Base64 variant.
*
* Return: the length of the resulting decoded binary data in bytes,
- * or -1 if the string isn't a valid base64 string.
+ * or -1 if the string isn't a valid Base64 string.
*/
-int base64_decode(const char *src, int srclen, u8 *dst)
+int base64_decode(const char *src, int srclen, u8 *dst, bool padding, enum base64_variant variant)
{
- u32 ac = 0;
- int bits = 0;
- int i;
u8 *bp = dst;
+ s8 input[4];
+ s32 val;
+ const u8 *s = (const u8 *)src;
+ const s8 *base64_rev_tables = base64_rev_maps[variant];
- for (i = 0; i < srclen; i++) {
- const char *p = strchr(base64_table, src[i]);
+ while (srclen >= 4) {
+ input[0] = base64_rev_tables[s[0]];
+ input[1] = base64_rev_tables[s[1]];
+ input[2] = base64_rev_tables[s[2]];
+ input[3] = base64_rev_tables[s[3]];
- if (src[i] == '=') {
- ac = (ac << 6);
- bits += 6;
- if (bits >= 8)
- bits -= 8;
- continue;
- }
- if (p == NULL || src[i] == 0)
- return -1;
- ac = (ac << 6) | (p - base64_table);
- bits += 6;
- if (bits >= 8) {
- bits -= 8;
- *bp++ = (u8)(ac >> bits);
+ val = input[0] << 18 | input[1] << 12 | input[2] << 6 | input[3];
+
+ if (unlikely(val < 0)) {
+ if (!padding || srclen != 4 || s[3] != '=')
+ return -1;
+ padding = 0;
+ srclen = s[2] == '=' ? 2 : 3;
+ break;
}
+
+ *bp++ = val >> 16;
+ *bp++ = val >> 8;
+ *bp++ = val;
+
+ s += 4;
+ srclen -= 4;
}
- if (ac & ((1 << bits) - 1))
+
+ if (likely(!srclen))
+ return bp - dst;
+ if (padding || srclen == 1)
return -1;
+
+ val = (base64_rev_tables[s[0]] << 12) | (base64_rev_tables[s[1]] << 6);
+ *bp++ = val >> 10;
+
+ if (srclen == 2) {
+ if (val & 0x800003ff)
+ return -1;
+ } else {
+ val |= base64_rev_tables[s[2]];
+ if (val & 0x80000003)
+ return -1;
+ *bp++ = val >> 2;
+ }
return bp - dst;
}
EXPORT_SYMBOL_GPL(base64_decode);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 5a007952f7f2..7d7892e57a01 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -95,6 +95,7 @@ static const struct { unsigned flag:8; char opt_char; } opt_array[] = {
{ _DPRINTK_FLAGS_INCL_SOURCENAME, 's' },
{ _DPRINTK_FLAGS_INCL_LINENO, 'l' },
{ _DPRINTK_FLAGS_INCL_TID, 't' },
+ { _DPRINTK_FLAGS_INCL_STACK, 'd' },
{ _DPRINTK_FLAGS_NONE, '_' },
};
diff --git a/lib/math/div64.c b/lib/math/div64.c
index bf77b9843175..d1e92ea24fce 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -177,94 +177,157 @@ EXPORT_SYMBOL(div64_s64);
* Iterative div/mod for use when dividend is not expected to be much
* bigger than divisor.
*/
+#ifndef iter_div_u64_rem
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);
+#endif
-#ifndef mul_u64_u64_div_u64
-u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
-{
- if (ilog2(a) + ilog2(b) <= 62)
- return div64_u64(a * b, c);
+#if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64)
-#if defined(__SIZEOF_INT128__)
+#define mul_add(a, b, c) add_u64_u32(mul_u32_u32(a, b), c)
+#if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
+{
/* native 64x64=128 bits multiplication */
- u128 prod = (u128)a * b;
- u64 n_lo = prod, n_hi = prod >> 64;
+ u128 prod = (u128)a * b + c;
+ *p_lo = prod;
+ return prod >> 64;
+}
#else
-
- /* perform a 64x64=128 bits multiplication manually */
- u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
+{
+ /* perform a 64x64=128 bits multiplication in 32bit chunks */
u64 x, y, z;
- x = (u64)a_lo * b_lo;
- y = (u64)a_lo * b_hi + (u32)(x >> 32);
- z = (u64)a_hi * b_hi + (u32)(y >> 32);
- y = (u64)a_hi * b_lo + (u32)y;
- z += (u32)(y >> 32);
- x = (y << 32) + (u32)x;
-
- u64 n_lo = x, n_hi = z;
+ /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
+ x = mul_add(a, b, c);
+ y = mul_add(a, b >> 32, c >> 32);
+ y = add_u64_u32(y, x >> 32);
+ z = mul_add(a >> 32, b >> 32, y >> 32);
+ y = mul_add(a >> 32, b, y);
+ *p_lo = (y << 32) + (u32)x;
+ return add_u64_u32(z, y >> 32);
+}
+#endif
+#ifndef BITS_PER_ITER
+#define BITS_PER_ITER (__LONG_WIDTH__ >= 64 ? 32 : 16)
#endif
- /* make sure c is not zero, trigger runtime exception otherwise */
- if (unlikely(c == 0)) {
- unsigned long zero = 0;
+#if BITS_PER_ITER == 32
+#define mul_u64_long_add_u64(p_lo, a, b, c) mul_u64_u64_add_u64(p_lo, a, b, c)
+#define add_u64_long(a, b) ((a) + (b))
+#else
+#undef BITS_PER_ITER
+#define BITS_PER_ITER 16
+static inline u32 mul_u64_long_add_u64(u64 *p_lo, u64 a, u32 b, u64 c)
+{
+ u64 n_lo = mul_add(a, b, c);
+ u64 n_med = mul_add(a >> 32, b, c >> 32);
- OPTIMIZER_HIDE_VAR(zero);
- return ~0UL/zero;
- }
+ n_med = add_u64_u32(n_med, n_lo >> 32);
+ *p_lo = n_med << 32 | (u32)n_lo;
+ return n_med >> 32;
+}
+
+#define add_u64_long(a, b) add_u64_u32(a, b)
+#endif
+
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
+{
+ unsigned long d_msig, q_digit;
+ unsigned int reps, d_z_hi;
+ u64 quotient, n_lo, n_hi;
+ u32 overflow;
- int shift = __builtin_ctzll(c);
+ n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
- /* try reducing the fraction in case the dividend becomes <= 64 bits */
- if ((n_hi >> shift) == 0) {
- u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo;
+ if (!n_hi)
+ return div64_u64(n_lo, d);
- return div64_u64(n, c >> shift);
- /*
- * The remainder value if needed would be:
- * res = div64_u64_rem(n, c >> shift, &rem);
- * rem = (rem << shift) + (n_lo - (n << shift));
- */
- }
+ if (unlikely(n_hi >= d)) {
+ /* trigger runtime exception if divisor is zero */
+ if (d == 0) {
+ unsigned long zero = 0;
- if (n_hi >= c) {
+ OPTIMIZER_HIDE_VAR(zero);
+ return ~0UL/zero;
+ }
/* overflow: result is unrepresentable in a u64 */
- return -1;
+ return ~0ULL;
}
- /* Do the full 128 by 64 bits division */
-
- shift = __builtin_clzll(c);
- c <<= shift;
+ /* Left align the divisor, shifting the dividend to match */
+ d_z_hi = __builtin_clzll(d);
+ if (d_z_hi) {
+ d <<= d_z_hi;
+ n_hi = n_hi << d_z_hi | n_lo >> (64 - d_z_hi);
+ n_lo <<= d_z_hi;
+ }
- int p = 64 + shift;
- u64 res = 0;
- bool carry;
+ reps = 64 / BITS_PER_ITER;
+ /* Optimise loop count for small dividends */
+ if (!(u32)(n_hi >> 32)) {
+ reps -= 32 / BITS_PER_ITER;
+ n_hi = n_hi << 32 | n_lo >> 32;
+ n_lo <<= 32;
+ }
+#if BITS_PER_ITER == 16
+ if (!(u32)(n_hi >> 48)) {
+ reps--;
+ n_hi = add_u64_u32(n_hi << 16, n_lo >> 48);
+ n_lo <<= 16;
+ }
+#endif
- do {
- carry = n_hi >> 63;
- shift = carry ? 1 : __builtin_clzll(n_hi);
- if (p < shift)
- break;
- p -= shift;
- n_hi <<= shift;
- n_hi |= n_lo >> (64 - shift);
- n_lo <<= shift;
- if (carry || (n_hi >= c)) {
- n_hi -= c;
- res |= 1ULL << p;
+ /* Invert the dividend so we can use add instead of subtract. */
+ n_lo = ~n_lo;
+ n_hi = ~n_hi;
+
+ /*
+ * Get the most significant BITS_PER_ITER bits of the divisor.
+ * This is used to get a low 'guestimate' of the quotient digit.
+ */
+ d_msig = (d >> (64 - BITS_PER_ITER)) + 1;
+
+ /*
+ * Now do a 'long division' with BITS_PER_ITER bit 'digits'.
+ * The 'guess' quotient digit can be low and BITS_PER_ITER+1 bits.
+ * The worst case is dividing ~0 by 0x8000 which requires two subtracts.
+ */
+ quotient = 0;
+ while (reps--) {
+ q_digit = (unsigned long)(~n_hi >> (64 - 2 * BITS_PER_ITER)) / d_msig;
+ /* Shift 'n' left to align with the product q_digit * d */
+ overflow = n_hi >> (64 - BITS_PER_ITER);
+ n_hi = add_u64_u32(n_hi << BITS_PER_ITER, n_lo >> (64 - BITS_PER_ITER));
+ n_lo <<= BITS_PER_ITER;
+ /* Add product to negated divisor */
+ overflow += mul_u64_long_add_u64(&n_hi, d, q_digit, n_hi);
+ /* Adjust for the q_digit 'guestimate' being low */
+ while (overflow < 0xffffffff >> (32 - BITS_PER_ITER)) {
+ q_digit++;
+ n_hi += d;
+ overflow += n_hi < d;
}
- } while (n_hi);
- /* The remainder value if needed would be n_hi << p */
+ quotient = add_u64_long(quotient << BITS_PER_ITER, q_digit);
+ }
- return res;
+ /*
+ * The above only ensures the remainder doesn't overflow,
+ * it can still be possible to add (aka subtract) another copy
+ * of the divisor.
+ */
+ if ((n_hi + d) > n_hi)
+ quotient++;
+ return quotient;
}
-EXPORT_SYMBOL(mul_u64_u64_div_u64);
+#if !defined(test_mul_u64_add_u64_div_u64)
+EXPORT_SYMBOL(mul_u64_add_u64_div_u64);
+#endif
#endif
diff --git a/lib/math/test_mul_u64_u64_div_u64.c b/lib/math/test_mul_u64_u64_div_u64.c
index 58d058de4e73..338d014f0c73 100644
--- a/lib/math/test_mul_u64_u64_div_u64.c
+++ b/lib/math/test_mul_u64_u64_div_u64.c
@@ -10,80 +10,141 @@
#include <linux/printk.h>
#include <linux/math64.h>
-typedef struct { u64 a; u64 b; u64 c; u64 result; } test_params;
+typedef struct { u64 a; u64 b; u64 d; u64 result; uint round_up;} test_params;
static test_params test_values[] = {
/* this contains many edge values followed by a couple random values */
-{ 0xb, 0x7, 0x3, 0x19 },
-{ 0xffff0000, 0xffff0000, 0xf, 0x1110eeef00000000 },
-{ 0xffffffff, 0xffffffff, 0x1, 0xfffffffe00000001 },
-{ 0xffffffff, 0xffffffff, 0x2, 0x7fffffff00000000 },
-{ 0x1ffffffff, 0xffffffff, 0x2, 0xfffffffe80000000 },
-{ 0x1ffffffff, 0xffffffff, 0x3, 0xaaaaaaa9aaaaaaab },
-{ 0x1ffffffff, 0x1ffffffff, 0x4, 0xffffffff00000000 },
-{ 0xffff000000000000, 0xffff000000000000, 0xffff000000000001, 0xfffeffffffffffff },
-{ 0x3333333333333333, 0x3333333333333333, 0x5555555555555555, 0x1eb851eb851eb851 },
-{ 0x7fffffffffffffff, 0x2, 0x3, 0x5555555555555554 },
-{ 0xffffffffffffffff, 0x2, 0x8000000000000000, 0x3 },
-{ 0xffffffffffffffff, 0x2, 0xc000000000000000, 0x2 },
-{ 0xffffffffffffffff, 0x4000000000000004, 0x8000000000000000, 0x8000000000000007 },
-{ 0xffffffffffffffff, 0x4000000000000001, 0x8000000000000000, 0x8000000000000001 },
-{ 0xffffffffffffffff, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000001 },
-{ 0xfffffffffffffffe, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000000 },
-{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffe, 0x8000000000000001 },
-{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffd, 0x8000000000000002 },
-{ 0x7fffffffffffffff, 0xffffffffffffffff, 0xc000000000000000, 0xaaaaaaaaaaaaaaa8 },
-{ 0xffffffffffffffff, 0x7fffffffffffffff, 0xa000000000000000, 0xccccccccccccccca },
-{ 0xffffffffffffffff, 0x7fffffffffffffff, 0x9000000000000000, 0xe38e38e38e38e38b },
-{ 0x7fffffffffffffff, 0x7fffffffffffffff, 0x5000000000000000, 0xccccccccccccccc9 },
-{ 0xffffffffffffffff, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfffffffffffffffe },
-{ 0xe6102d256d7ea3ae, 0x70a77d0be4c31201, 0xd63ec35ab3220357, 0x78f8bf8cc86c6e18 },
-{ 0xf53bae05cb86c6e1, 0x3847b32d2f8d32e0, 0xcfd4f55a647f403c, 0x42687f79d8998d35 },
-{ 0x9951c5498f941092, 0x1f8c8bfdf287a251, 0xa3c8dc5f81ea3fe2, 0x1d887cb25900091f },
-{ 0x374fee9daa1bb2bb, 0x0d0bfbff7b8ae3ef, 0xc169337bd42d5179, 0x03bb2dbaffcbb961 },
-{ 0xeac0d03ac10eeaf0, 0x89be05dfa162ed9b, 0x92bb1679a41f0e4b, 0xdc5f5cc9e270d216 },
+{ 0xb, 0x7, 0x3, 0x19, 1 },
+{ 0xffff0000, 0xffff0000, 0xf, 0x1110eeef00000000, 0 },
+{ 0xffffffff, 0xffffffff, 0x1, 0xfffffffe00000001, 0 },
+{ 0xffffffff, 0xffffffff, 0x2, 0x7fffffff00000000, 1 },
+{ 0x1ffffffff, 0xffffffff, 0x2, 0xfffffffe80000000, 1 },
+{ 0x1ffffffff, 0xffffffff, 0x3, 0xaaaaaaa9aaaaaaab, 0 },
+{ 0x1ffffffff, 0x1ffffffff, 0x4, 0xffffffff00000000, 1 },
+{ 0xffff000000000000, 0xffff000000000000, 0xffff000000000001, 0xfffeffffffffffff, 1 },
+{ 0x3333333333333333, 0x3333333333333333, 0x5555555555555555, 0x1eb851eb851eb851, 1 },
+{ 0x7fffffffffffffff, 0x2, 0x3, 0x5555555555555554, 1 },
+{ 0xffffffffffffffff, 0x2, 0x8000000000000000, 0x3, 1 },
+{ 0xffffffffffffffff, 0x2, 0xc000000000000000, 0x2, 1 },
+{ 0xffffffffffffffff, 0x4000000000000004, 0x8000000000000000, 0x8000000000000007, 1 },
+{ 0xffffffffffffffff, 0x4000000000000001, 0x8000000000000000, 0x8000000000000001, 1 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000001, 0 },
+{ 0xfffffffffffffffe, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000000, 1 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffe, 0x8000000000000001, 1 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffd, 0x8000000000000002, 1 },
+{ 0x7fffffffffffffff, 0xffffffffffffffff, 0xc000000000000000, 0xaaaaaaaaaaaaaaa8, 1 },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0xa000000000000000, 0xccccccccccccccca, 1 },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0x9000000000000000, 0xe38e38e38e38e38b, 1 },
+{ 0x7fffffffffffffff, 0x7fffffffffffffff, 0x5000000000000000, 0xccccccccccccccc9, 1 },
+{ 0xffffffffffffffff, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfffffffffffffffe, 0 },
+{ 0xe6102d256d7ea3ae, 0x70a77d0be4c31201, 0xd63ec35ab3220357, 0x78f8bf8cc86c6e18, 1 },
+{ 0xf53bae05cb86c6e1, 0x3847b32d2f8d32e0, 0xcfd4f55a647f403c, 0x42687f79d8998d35, 1 },
+{ 0x9951c5498f941092, 0x1f8c8bfdf287a251, 0xa3c8dc5f81ea3fe2, 0x1d887cb25900091f, 1 },
+{ 0x374fee9daa1bb2bb, 0x0d0bfbff7b8ae3ef, 0xc169337bd42d5179, 0x03bb2dbaffcbb961, 1 },
+{ 0xeac0d03ac10eeaf0, 0x89be05dfa162ed9b, 0x92bb1679a41f0e4b, 0xdc5f5cc9e270d216, 1 },
};
/*
* The above table can be verified with the following shell script:
- *
- * #!/bin/sh
- * sed -ne 's/^{ \+\(.*\), \+\(.*\), \+\(.*\), \+\(.*\) },$/\1 \2 \3 \4/p' \
- * lib/math/test_mul_u64_u64_div_u64.c |
- * while read a b c r; do
- * expected=$( printf "obase=16; ibase=16; %X * %X / %X\n" $a $b $c | bc )
- * given=$( printf "%X\n" $r )
- * if [ "$expected" = "$given" ]; then
- * echo "$a * $b / $c = $r OK"
- * else
- * echo "$a * $b / $c = $r is wrong" >&2
- * echo "should be equivalent to 0x$expected" >&2
- * exit 1
- * fi
- * done
+
+#!/bin/sh
+sed -ne 's/^{ \+\(.*\), \+\(.*\), \+\(.*\), \+\(.*\), \+\(.*\) },$/\1 \2 \3 \4 \5/p' \
+ lib/math/test_mul_u64_u64_div_u64.c |
+while read a b d r e; do
+ expected=$( printf "obase=16; ibase=16; %X * %X / %X\n" $a $b $d | bc )
+ given=$( printf "%X\n" $r )
+ if [ "$expected" = "$given" ]; then
+ echo "$a * $b / $d = $r OK"
+ else
+ echo "$a * $b / $d = $r is wrong" >&2
+ echo "should be equivalent to 0x$expected" >&2
+ exit 1
+ fi
+ expected=$( printf "obase=16; ibase=16; (%X * %X + %X) / %X\n" $a $b $((d-1)) $d | bc )
+ given=$( printf "%X\n" $((r + e)) )
+ if [ "$expected" = "$given" ]; then
+ echo "$a * $b +/ $d = $(printf '%#x' $((r + e))) OK"
+ else
+ echo "$a * $b +/ $d = $(printf '%#x' $((r + e))) is wrong" >&2
+ echo "should be equivalent to 0x$expected" >&2
+ exit 1
+ fi
+done
+
*/
-static int __init test_init(void)
+static u64 test_mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
+#if __LONG_WIDTH__ >= 64
+#define TEST_32BIT_DIV
+static u64 test_mul_u64_add_u64_div_u64_32bit(u64 a, u64 b, u64 c, u64 d);
+#endif
+
+static int __init test_run(unsigned int fn_no, const char *fn_name)
{
+ u64 start_time;
+ int errors = 0;
+ int tests = 0;
int i;
- pr_info("Starting mul_u64_u64_div_u64() test\n");
+ start_time = ktime_get_ns();
for (i = 0; i < ARRAY_SIZE(test_values); i++) {
u64 a = test_values[i].a;
u64 b = test_values[i].b;
- u64 c = test_values[i].c;
+ u64 d = test_values[i].d;
u64 expected_result = test_values[i].result;
- u64 result = mul_u64_u64_div_u64(a, b, c);
+ u64 result, result_up;
+
+ switch (fn_no) {
+ default:
+ result = mul_u64_u64_div_u64(a, b, d);
+ result_up = mul_u64_u64_div_u64_roundup(a, b, d);
+ break;
+ case 1:
+ result = test_mul_u64_add_u64_div_u64(a, b, 0, d);
+ result_up = test_mul_u64_add_u64_div_u64(a, b, d - 1, d);
+ break;
+#ifdef TEST_32BIT_DIV
+ case 2:
+ result = test_mul_u64_add_u64_div_u64_32bit(a, b, 0, d);
+ result_up = test_mul_u64_add_u64_div_u64_32bit(a, b, d - 1, d);
+ break;
+#endif
+ }
+
+ tests += 2;
if (result != expected_result) {
- pr_err("ERROR: 0x%016llx * 0x%016llx / 0x%016llx\n", a, b, c);
+ pr_err("ERROR: 0x%016llx * 0x%016llx / 0x%016llx\n", a, b, d);
pr_err("ERROR: expected result: %016llx\n", expected_result);
pr_err("ERROR: obtained result: %016llx\n", result);
+ errors++;
+ }
+ expected_result += test_values[i].round_up;
+ if (result_up != expected_result) {
+ pr_err("ERROR: 0x%016llx * 0x%016llx +/ 0x%016llx\n", a, b, d);
+ pr_err("ERROR: expected result: %016llx\n", expected_result);
+ pr_err("ERROR: obtained result: %016llx\n", result_up);
+ errors++;
}
}
- pr_info("Completed mul_u64_u64_div_u64() test\n");
+ pr_info("Completed %s() test, %d tests, %d errors, %llu ns\n",
+ fn_name, tests, errors, ktime_get_ns() - start_time);
+ return errors;
+}
+
+static int __init test_init(void)
+{
+ pr_info("Starting mul_u64_u64_div_u64() test\n");
+ if (test_run(0, "mul_u64_u64_div_u64"))
+ return -EINVAL;
+ if (test_run(1, "test_mul_u64_u64_div_u64"))
+ return -EINVAL;
+#ifdef TEST_32BIT_DIV
+ if (test_run(2, "test_mul_u64_u64_div_u64_32bit"))
+ return -EINVAL;
+#endif
return 0;
}
@@ -91,6 +152,36 @@ static void __exit test_exit(void)
{
}
+/* Compile the generic mul_u64_add_u64_div_u64() code */
+#undef __div64_32
+#define __div64_32 __div64_32
+#define div_s64_rem div_s64_rem
+#define div64_u64_rem div64_u64_rem
+#define div64_u64 div64_u64
+#define div64_s64 div64_s64
+#define iter_div_u64_rem iter_div_u64_rem
+
+#undef mul_u64_add_u64_div_u64
+#define mul_u64_add_u64_div_u64 test_mul_u64_add_u64_div_u64
+#define test_mul_u64_add_u64_div_u64 test_mul_u64_add_u64_div_u64
+
+#include "div64.c"
+
+#ifdef TEST_32BIT_DIV
+/* Recompile the generic code for 32bit long */
+#undef test_mul_u64_add_u64_div_u64
+#define test_mul_u64_add_u64_div_u64 test_mul_u64_add_u64_div_u64_32bit
+#undef BITS_PER_ITER
+#define BITS_PER_ITER 16
+
+#define mul_u64_u64_add_u64 mul_u64_u64_add_u64_32bit
+#undef mul_u64_long_add_u64
+#undef add_u64_long
+#undef mul_add
+
+#include "div64.c"
+#endif
+
module_init(test_init);
module_exit(test_exit);
diff --git a/lib/plist.c b/lib/plist.c
index 330febb4bd7d..ba677c31e8f3 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -47,8 +47,8 @@ static void plist_check_list(struct list_head *top)
plist_check_prev_next(top, prev, next);
while (next != top) {
- WRITE_ONCE(prev, next);
- WRITE_ONCE(next, prev->next);
+ prev = next;
+ next = prev->next;
plist_check_prev_next(top, prev, next);
}
}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 859c251b23ce..e2d65d3b1c35 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -27,7 +27,7 @@
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
/* Paired with WRITE_ONCE() in .proc_handler().
- * Changing two values seperately could be inconsistent
+ * Changing two values separately could be inconsistent
* and some message could be lost. (See: net_ratelimit_state).
*/
int interval = READ_ONCE(rs->interval);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 5114eda6309c..18d42bcf4ec9 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -460,35 +460,6 @@ void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
}
EXPORT_SYMBOL(__rb_insert_augmented);
-/*
- * This function returns the first node (in sort order) of the tree.
- */
-struct rb_node *rb_first(const struct rb_root *root)
-{
- struct rb_node *n;
-
- n = root->rb_node;
- if (!n)
- return NULL;
- while (n->rb_left)
- n = n->rb_left;
- return n;
-}
-EXPORT_SYMBOL(rb_first);
-
-struct rb_node *rb_last(const struct rb_root *root)
-{
- struct rb_node *n;
-
- n = root->rb_node;
- if (!n)
- return NULL;
- while (n->rb_right)
- n = n->rb_right;
- return n;
-}
-EXPORT_SYMBOL(rb_last);
-
struct rb_node *rb_next(const struct rb_node *node)
{
struct rb_node *parent;
diff --git a/lib/sys_info.c b/lib/sys_info.c
index 496f9151c9b6..f32a06ec9ed4 100644
--- a/lib/sys_info.c
+++ b/lib/sys_info.c
@@ -1,31 +1,35 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/sched/debug.h>
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/console.h>
+#include <linux/log2.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
-#include <linux/sysctl.h>
#include <linux/nmi.h>
+#include <linux/sched/debug.h>
+#include <linux/string.h>
+#include <linux/sysctl.h>
#include <linux/sys_info.h>
-struct sys_info_name {
- unsigned long bit;
- const char *name;
+static const char * const si_names[] = {
+ [ilog2(SYS_INFO_TASKS)] = "tasks",
+ [ilog2(SYS_INFO_MEM)] = "mem",
+ [ilog2(SYS_INFO_TIMERS)] = "timers",
+ [ilog2(SYS_INFO_LOCKS)] = "locks",
+ [ilog2(SYS_INFO_FTRACE)] = "ftrace",
+ [ilog2(SYS_INFO_PANIC_CONSOLE_REPLAY)] = "",
+ [ilog2(SYS_INFO_ALL_BT)] = "all_bt",
+ [ilog2(SYS_INFO_BLOCKED_TASKS)] = "blocked_tasks",
};
/*
- * When 'si_names' gets updated, please make sure the 'sys_info_avail'
- * below is updated accordingly.
+ * Default kernel sys_info mask.
+ * If a kernel module calls sys_info() with "parameter == 0", then
+ * this mask will be used.
*/
-static const struct sys_info_name si_names[] = {
- { SYS_INFO_TASKS, "tasks" },
- { SYS_INFO_MEM, "mem" },
- { SYS_INFO_TIMERS, "timers" },
- { SYS_INFO_LOCKS, "locks" },
- { SYS_INFO_FTRACE, "ftrace" },
- { SYS_INFO_ALL_CPU_BT, "all_bt" },
- { SYS_INFO_BLOCKED_TASKS, "blocked_tasks" },
-};
+static unsigned long kernel_si_mask;
/* Expecting string like "xxx_sys_info=tasks,mem,timers,locks,ftrace,..." */
unsigned long sys_info_parse_param(char *str)
@@ -36,12 +40,9 @@ unsigned long sys_info_parse_param(char *str)
s = str;
while ((name = strsep(&s, ",")) && *name) {
- for (i = 0; i < ARRAY_SIZE(si_names); i++) {
- if (!strcmp(name, si_names[i].name)) {
- si_bits |= si_names[i].bit;
- break;
- }
- }
+ i = match_string(si_names, ARRAY_SIZE(si_names), name);
+ if (i >= 0)
+ __set_bit(i, &si_bits);
}
return si_bits;
@@ -49,56 +50,93 @@ unsigned long sys_info_parse_param(char *str)
#ifdef CONFIG_SYSCTL
-static const char sys_info_avail[] __maybe_unused = "tasks,mem,timers,locks,ftrace,all_bt,blocked_tasks";
+static int sys_info_write_handler(const struct ctl_table *table,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ unsigned long *si_bits_global)
+{
+ unsigned long si_bits;
+ int ret;
+
+ ret = proc_dostring(table, 1, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+
+ si_bits = sys_info_parse_param(table->data);
+
+ /* The access to the global value is not synchronized. */
+ WRITE_ONCE(*si_bits_global, si_bits);
+
+ return 0;
+}
+
+static int sys_info_read_handler(const struct ctl_table *table,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ unsigned long *si_bits_global)
+{
+ unsigned long si_bits;
+ unsigned int len = 0;
+ char *delim = "";
+ unsigned int i;
+
+ /* The access to the global value is not synchronized. */
+ si_bits = READ_ONCE(*si_bits_global);
+
+ for_each_set_bit(i, &si_bits, ARRAY_SIZE(si_names)) {
+ if (*si_names[i]) {
+ len += scnprintf(table->data + len, table->maxlen - len,
+ "%s%s", delim, si_names[i]);
+ delim = ",";
+ }
+ }
+
+ return proc_dostring(table, 0, buffer, lenp, ppos);
+}
int sysctl_sys_info_handler(const struct ctl_table *ro_table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
- char names[sizeof(sys_info_avail)];
struct ctl_table table;
- unsigned long *si_bits_global;
-
- si_bits_global = ro_table->data;
-
- if (write) {
- unsigned long si_bits;
- int ret;
-
- table = *ro_table;
- table.data = names;
- table.maxlen = sizeof(names);
- ret = proc_dostring(&table, write, buffer, lenp, ppos);
- if (ret)
- return ret;
-
- si_bits = sys_info_parse_param(names);
- /* The access to the global value is not synchronized. */
- WRITE_ONCE(*si_bits_global, si_bits);
- return 0;
- } else {
- /* for 'read' operation */
- char *delim = "";
- int i, len = 0;
-
- names[0] = '\0';
- for (i = 0; i < ARRAY_SIZE(si_names); i++) {
- if (*si_bits_global & si_names[i].bit) {
- len += scnprintf(names + len, sizeof(names) - len,
- "%s%s", delim, si_names[i].name);
- delim = ",";
- }
- }
+ unsigned int i;
+ size_t maxlen;
- table = *ro_table;
- table.data = names;
- table.maxlen = sizeof(names);
- return proc_dostring(&table, write, buffer, lenp, ppos);
- }
+ maxlen = 0;
+ for (i = 0; i < ARRAY_SIZE(si_names); i++)
+ maxlen += strlen(si_names[i]) + 1;
+
+ char *names __free(kfree) = kzalloc(maxlen, GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ table = *ro_table;
+ table.data = names;
+ table.maxlen = maxlen;
+
+ if (write)
+ return sys_info_write_handler(&table, buffer, lenp, ppos, ro_table->data);
+ else
+ return sys_info_read_handler(&table, buffer, lenp, ppos, ro_table->data);
+}
+
+static const struct ctl_table sys_info_sysctls[] = {
+ {
+ .procname = "kernel_sys_info",
+ .data = &kernel_si_mask,
+ .maxlen = sizeof(kernel_si_mask),
+ .mode = 0644,
+ .proc_handler = sysctl_sys_info_handler,
+ },
+};
+
+static int __init sys_info_sysctl_init(void)
+{
+ register_sysctl_init("kernel", sys_info_sysctls);
+ return 0;
}
+subsys_initcall(sys_info_sysctl_init);
#endif
-void sys_info(unsigned long si_mask)
+static void __sys_info(unsigned long si_mask)
{
if (si_mask & SYS_INFO_TASKS)
show_state();
@@ -115,9 +153,14 @@ void sys_info(unsigned long si_mask)
if (si_mask & SYS_INFO_FTRACE)
ftrace_dump(DUMP_ALL);
- if (si_mask & SYS_INFO_ALL_CPU_BT)
+ if (si_mask & SYS_INFO_ALL_BT)
trigger_all_cpu_backtrace();
if (si_mask & SYS_INFO_BLOCKED_TASKS)
show_state_filter(TASK_UNINTERRUPTIBLE);
}
+
+void sys_info(unsigned long si_mask)
+{
+ __sys_info(si_mask ? : kernel_si_mask);
+}
diff --git a/lib/test_kho.c b/lib/test_kho.c
index fff018e5548d..47de56280795 100644
--- a/lib/test_kho.c
+++ b/lib/test_kho.c
@@ -33,44 +33,28 @@ struct kho_test_state {
unsigned int nr_folios;
struct folio **folios;
phys_addr_t *folios_info;
+ struct kho_vmalloc folios_info_phys;
+ int nr_folios_preserved;
struct folio *fdt;
__wsum csum;
};
static struct kho_test_state kho_test_state;
-static int kho_test_notifier(struct notifier_block *self, unsigned long cmd,
- void *v)
+static void kho_test_unpreserve_data(struct kho_test_state *state)
{
- struct kho_test_state *state = &kho_test_state;
- struct kho_serialization *ser = v;
- int err = 0;
-
- switch (cmd) {
- case KEXEC_KHO_ABORT:
- return NOTIFY_DONE;
- case KEXEC_KHO_FINALIZE:
- /* Handled below */
- break;
- default:
- return NOTIFY_BAD;
- }
-
- err |= kho_preserve_folio(state->fdt);
- err |= kho_add_subtree(ser, KHO_TEST_FDT, folio_address(state->fdt));
+ for (int i = 0; i < state->nr_folios_preserved; i++)
+ kho_unpreserve_folio(state->folios[i]);
- return err ? NOTIFY_BAD : NOTIFY_DONE;
+ kho_unpreserve_vmalloc(&state->folios_info_phys);
+ vfree(state->folios_info);
}
-static struct notifier_block kho_test_nb = {
- .notifier_call = kho_test_notifier,
-};
-
-static int kho_test_save_data(struct kho_test_state *state, void *fdt)
+static int kho_test_preserve_data(struct kho_test_state *state)
{
- phys_addr_t *folios_info __free(kvfree) = NULL;
struct kho_vmalloc folios_info_phys;
- int err = 0;
+ phys_addr_t *folios_info;
+ int err;
folios_info = vmalloc_array(state->nr_folios, sizeof(*folios_info));
if (!folios_info)
@@ -78,62 +62,98 @@ static int kho_test_save_data(struct kho_test_state *state, void *fdt)
err = kho_preserve_vmalloc(folios_info, &folios_info_phys);
if (err)
- return err;
+ goto err_free_info;
+
+ state->folios_info_phys = folios_info_phys;
+ state->folios_info = folios_info;
for (int i = 0; i < state->nr_folios; i++) {
struct folio *folio = state->folios[i];
unsigned int order = folio_order(folio);
folios_info[i] = virt_to_phys(folio_address(folio)) | order;
-
err = kho_preserve_folio(folio);
if (err)
- break;
+ goto err_unpreserve;
+ state->nr_folios_preserved++;
}
+ return 0;
+
+err_unpreserve:
+ /*
+ * kho_test_unpreserve_data frees folio_info, bail out immediately to
+ * avoid double free
+ */
+ kho_test_unpreserve_data(state);
+ return err;
+
+err_free_info:
+ vfree(folios_info);
+ return err;
+}
+
+static int kho_test_prepare_fdt(struct kho_test_state *state, ssize_t fdt_size)
+{
+ const char compatible[] = KHO_TEST_COMPAT;
+ unsigned int magic = KHO_TEST_MAGIC;
+ void *fdt = folio_address(state->fdt);
+ int err;
+
+ err = fdt_create(fdt, fdt_size);
+ err |= fdt_finish_reservemap(fdt);
+ err |= fdt_begin_node(fdt, "");
+ err |= fdt_property(fdt, "compatible", compatible, sizeof(compatible));
+ err |= fdt_property(fdt, "magic", &magic, sizeof(magic));
+
err |= fdt_begin_node(fdt, "data");
err |= fdt_property(fdt, "nr_folios", &state->nr_folios,
sizeof(state->nr_folios));
- err |= fdt_property(fdt, "folios_info", &folios_info_phys,
- sizeof(folios_info_phys));
+ err |= fdt_property(fdt, "folios_info", &state->folios_info_phys,
+ sizeof(state->folios_info_phys));
err |= fdt_property(fdt, "csum", &state->csum, sizeof(state->csum));
err |= fdt_end_node(fdt);
- if (!err)
- state->folios_info = no_free_ptr(folios_info);
+ err |= fdt_end_node(fdt);
+ err |= fdt_finish(fdt);
return err;
}
-static int kho_test_prepare_fdt(struct kho_test_state *state)
+static int kho_test_preserve(struct kho_test_state *state)
{
- const char compatible[] = KHO_TEST_COMPAT;
- unsigned int magic = KHO_TEST_MAGIC;
ssize_t fdt_size;
- int err = 0;
- void *fdt;
+ int err;
fdt_size = state->nr_folios * sizeof(phys_addr_t) + PAGE_SIZE;
state->fdt = folio_alloc(GFP_KERNEL, get_order(fdt_size));
if (!state->fdt)
return -ENOMEM;
- fdt = folio_address(state->fdt);
-
- err |= fdt_create(fdt, fdt_size);
- err |= fdt_finish_reservemap(fdt);
+ err = kho_preserve_folio(state->fdt);
+ if (err)
+ goto err_free_fdt;
- err |= fdt_begin_node(fdt, "");
- err |= fdt_property(fdt, "compatible", compatible, sizeof(compatible));
- err |= fdt_property(fdt, "magic", &magic, sizeof(magic));
- err |= kho_test_save_data(state, fdt);
- err |= fdt_end_node(fdt);
+ err = kho_test_preserve_data(state);
+ if (err)
+ goto err_unpreserve_fdt;
- err |= fdt_finish(fdt);
+ err = kho_test_prepare_fdt(state, fdt_size);
+ if (err)
+ goto err_unpreserve_data;
+ err = kho_add_subtree(KHO_TEST_FDT, folio_address(state->fdt));
if (err)
- folio_put(state->fdt);
+ goto err_unpreserve_data;
+
+ return 0;
+err_unpreserve_data:
+ kho_test_unpreserve_data(state);
+err_unpreserve_fdt:
+ kho_unpreserve_folio(state->fdt);
+err_free_fdt:
+ folio_put(state->fdt);
return err;
}
@@ -199,18 +219,12 @@ static int kho_test_save(void)
if (err)
goto err_free_folios;
- err = kho_test_prepare_fdt(state);
+ err = kho_test_preserve(state);
if (err)
goto err_free_folios;
- err = register_kho_notifier(&kho_test_nb);
- if (err)
- goto err_free_fdt;
-
return 0;
-err_free_fdt:
- folio_put(state->fdt);
err_free_folios:
kvfree(folios);
return err;
@@ -292,7 +306,6 @@ static int kho_test_restore(phys_addr_t fdt_phys)
if (err)
return err;
- pr_info("KHO restore succeeded\n");
return 0;
}
@@ -305,8 +318,15 @@ static int __init kho_test_init(void)
return 0;
err = kho_retrieve_subtree(KHO_TEST_FDT, &fdt_phys);
- if (!err)
- return kho_test_restore(fdt_phys);
+ if (!err) {
+ err = kho_test_restore(fdt_phys);
+ if (err)
+ pr_err("KHO restore failed\n");
+ else
+ pr_info("KHO restore succeeded\n");
+
+ return err;
+ }
if (err != -ENOENT) {
pr_warn("failed to retrieve %s FDT: %d\n", KHO_TEST_FDT, err);
@@ -329,7 +349,7 @@ static void kho_test_cleanup(void)
static void __exit kho_test_exit(void)
{
- unregister_kho_notifier(&kho_test_nb);
+ kho_remove_subtree(folio_address(kho_test_state.fdt));
kho_test_cleanup();
}
module_exit(kho_test_exit);
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index f7460831cfdd..601dba4b7d96 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -4,6 +4,7 @@
# KUnit tests
CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_BASE64_KUNIT) += base64_kunit.o
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_BLACKHOLE_DEV_KUNIT_TEST) += blackhole_dev_kunit.o
diff --git a/lib/tests/base64_kunit.c b/lib/tests/base64_kunit.c
new file mode 100644
index 000000000000..f7252070c359
--- /dev/null
+++ b/lib/tests/base64_kunit.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64_kunit_test.c - KUnit tests for base64 encoding and decoding functions
+ *
+ * Copyright (c) 2025, Guan-Chun Wu <409411716@gms.tku.edu.tw>
+ */
+
+#include <kunit/test.h>
+#include <linux/base64.h>
+
+/* ---------- Benchmark helpers ---------- */
+static u64 bench_encode_ns(const u8 *data, int len, char *dst, int reps,
+ enum base64_variant variant)
+{
+ u64 t0, t1;
+
+ t0 = ktime_get_ns();
+ for (int i = 0; i < reps; i++)
+ base64_encode(data, len, dst, true, variant);
+ t1 = ktime_get_ns();
+
+ return div64_u64(t1 - t0, (u64)reps);
+}
+
+static u64 bench_decode_ns(const char *data, int len, u8 *dst, int reps,
+ enum base64_variant variant)
+{
+ u64 t0, t1;
+
+ t0 = ktime_get_ns();
+ for (int i = 0; i < reps; i++)
+ base64_decode(data, len, dst, true, variant);
+ t1 = ktime_get_ns();
+
+ return div64_u64(t1 - t0, (u64)reps);
+}
+
+static void run_perf_and_check(struct kunit *test, const char *label, int size,
+ enum base64_variant variant)
+{
+ const int reps = 1000;
+ size_t outlen = DIV_ROUND_UP(size, 3) * 4;
+ u8 *in = kmalloc(size, GFP_KERNEL);
+ char *enc = kmalloc(outlen, GFP_KERNEL);
+ u8 *decoded = kmalloc(size, GFP_KERNEL);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, enc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, decoded);
+
+ get_random_bytes(in, size);
+ int enc_len = base64_encode(in, size, enc, true, variant);
+ int dec_len = base64_decode(enc, enc_len, decoded, true, variant);
+
+ /* correctness sanity check */
+ KUNIT_EXPECT_EQ(test, dec_len, size);
+ KUNIT_EXPECT_MEMEQ(test, decoded, in, size);
+
+ /* benchmark encode */
+
+ u64 t1 = bench_encode_ns(in, size, enc, reps, variant);
+
+ kunit_info(test, "[%s] encode run : %lluns", label, t1);
+
+ u64 t2 = bench_decode_ns(enc, enc_len, decoded, reps, variant);
+
+ kunit_info(test, "[%s] decode run : %lluns", label, t2);
+
+ kfree(in);
+ kfree(enc);
+ kfree(decoded);
+}
+
+static void base64_performance_tests(struct kunit *test)
+{
+ /* run on STD variant only */
+ run_perf_and_check(test, "64B", 64, BASE64_STD);
+ run_perf_and_check(test, "1KB", 1024, BASE64_STD);
+}
+
+/* ---------- Helpers for encode ---------- */
+static void expect_encode_ok(struct kunit *test, const u8 *src, int srclen,
+ const char *expected, bool padding,
+ enum base64_variant variant)
+{
+ char buf[128];
+ int encoded_len = base64_encode(src, srclen, buf, padding, variant);
+
+ buf[encoded_len] = '\0';
+
+ KUNIT_EXPECT_EQ(test, encoded_len, strlen(expected));
+ KUNIT_EXPECT_STREQ(test, buf, expected);
+}
+
+/* ---------- Helpers for decode ---------- */
+static void expect_decode_ok(struct kunit *test, const char *src,
+ const u8 *expected, int expected_len, bool padding,
+ enum base64_variant variant)
+{
+ u8 buf[128];
+ int decoded_len = base64_decode(src, strlen(src), buf, padding, variant);
+
+ KUNIT_EXPECT_EQ(test, decoded_len, expected_len);
+ KUNIT_EXPECT_MEMEQ(test, buf, expected, expected_len);
+}
+
+static void expect_decode_err(struct kunit *test, const char *src,
+ int srclen, bool padding,
+ enum base64_variant variant)
+{
+ u8 buf[64];
+ int decoded_len = base64_decode(src, srclen, buf, padding, variant);
+
+ KUNIT_EXPECT_EQ(test, decoded_len, -1);
+}
+
+/* ---------- Encode Tests ---------- */
+static void base64_std_encode_tests(struct kunit *test)
+{
+ /* With padding */
+ expect_encode_ok(test, (const u8 *)"", 0, "", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"f", 1, "Zg==", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"fo", 2, "Zm8=", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"foo", 3, "Zm9v", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"foob", 4, "Zm9vYg==", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"fooba", 5, "Zm9vYmE=", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"foobar", 6, "Zm9vYmFy", true, BASE64_STD);
+
+ /* Extra cases with padding */
+ expect_encode_ok(test, (const u8 *)"Hello, world!", 13, "SGVsbG8sIHdvcmxkIQ==",
+ true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"ABCDEFGHIJKLMNOPQRSTUVWXYZ", 26,
+ "QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVo=", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"abcdefghijklmnopqrstuvwxyz", 26,
+ "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo=", true, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"0123456789+/", 12, "MDEyMzQ1Njc4OSsv",
+ true, BASE64_STD);
+
+ /* Without padding */
+ expect_encode_ok(test, (const u8 *)"", 0, "", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"f", 1, "Zg", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"fo", 2, "Zm8", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"foo", 3, "Zm9v", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"foob", 4, "Zm9vYg", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"fooba", 5, "Zm9vYmE", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"foobar", 6, "Zm9vYmFy", false, BASE64_STD);
+
+ /* Extra cases without padding */
+ expect_encode_ok(test, (const u8 *)"Hello, world!", 13, "SGVsbG8sIHdvcmxkIQ",
+ false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"ABCDEFGHIJKLMNOPQRSTUVWXYZ", 26,
+ "QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVo", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"abcdefghijklmnopqrstuvwxyz", 26,
+ "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo", false, BASE64_STD);
+ expect_encode_ok(test, (const u8 *)"0123456789+/", 12, "MDEyMzQ1Njc4OSsv",
+ false, BASE64_STD);
+}
+
+/* ---------- Decode Tests ---------- */
+static void base64_std_decode_tests(struct kunit *test)
+{
+ /* -------- With padding --------*/
+ expect_decode_ok(test, "", (const u8 *)"", 0, true, BASE64_STD);
+ expect_decode_ok(test, "Zg==", (const u8 *)"f", 1, true, BASE64_STD);
+ expect_decode_ok(test, "Zm8=", (const u8 *)"fo", 2, true, BASE64_STD);
+ expect_decode_ok(test, "Zm9v", (const u8 *)"foo", 3, true, BASE64_STD);
+ expect_decode_ok(test, "Zm9vYg==", (const u8 *)"foob", 4, true, BASE64_STD);
+ expect_decode_ok(test, "Zm9vYmE=", (const u8 *)"fooba", 5, true, BASE64_STD);
+ expect_decode_ok(test, "Zm9vYmFy", (const u8 *)"foobar", 6, true, BASE64_STD);
+ expect_decode_ok(test, "SGVsbG8sIHdvcmxkIQ==", (const u8 *)"Hello, world!", 13,
+ true, BASE64_STD);
+ expect_decode_ok(test, "QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVo=",
+ (const u8 *)"ABCDEFGHIJKLMNOPQRSTUVWXYZ", 26, true, BASE64_STD);
+ expect_decode_ok(test, "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo=",
+ (const u8 *)"abcdefghijklmnopqrstuvwxyz", 26, true, BASE64_STD);
+
+ /* Error cases */
+ expect_decode_err(test, "Zg=!", 4, true, BASE64_STD);
+ expect_decode_err(test, "Zm$=", 4, true, BASE64_STD);
+ expect_decode_err(test, "Z===", 4, true, BASE64_STD);
+ expect_decode_err(test, "Zg", 2, true, BASE64_STD);
+ expect_decode_err(test, "Zm9v====", 8, true, BASE64_STD);
+ expect_decode_err(test, "Zm==A", 5, true, BASE64_STD);
+
+ {
+ char with_nul[4] = { 'Z', 'g', '\0', '=' };
+
+ expect_decode_err(test, with_nul, 4, true, BASE64_STD);
+ }
+
+ /* -------- Without padding --------*/
+ expect_decode_ok(test, "", (const u8 *)"", 0, false, BASE64_STD);
+ expect_decode_ok(test, "Zg", (const u8 *)"f", 1, false, BASE64_STD);
+ expect_decode_ok(test, "Zm8", (const u8 *)"fo", 2, false, BASE64_STD);
+ expect_decode_ok(test, "Zm9v", (const u8 *)"foo", 3, false, BASE64_STD);
+ expect_decode_ok(test, "Zm9vYg", (const u8 *)"foob", 4, false, BASE64_STD);
+ expect_decode_ok(test, "Zm9vYmE", (const u8 *)"fooba", 5, false, BASE64_STD);
+ expect_decode_ok(test, "Zm9vYmFy", (const u8 *)"foobar", 6, false, BASE64_STD);
+ expect_decode_ok(test, "TWFu", (const u8 *)"Man", 3, false, BASE64_STD);
+ expect_decode_ok(test, "SGVsbG8sIHdvcmxkIQ", (const u8 *)"Hello, world!", 13,
+ false, BASE64_STD);
+ expect_decode_ok(test, "QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVo",
+ (const u8 *)"ABCDEFGHIJKLMNOPQRSTUVWXYZ", 26, false, BASE64_STD);
+ expect_decode_ok(test, "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo",
+ (const u8 *)"abcdefghijklmnopqrstuvwxyz", 26, false, BASE64_STD);
+ expect_decode_ok(test, "MDEyMzQ1Njc4OSsv", (const u8 *)"0123456789+/", 12,
+ false, BASE64_STD);
+
+ /* Error cases */
+ expect_decode_err(test, "Zg=!", 4, false, BASE64_STD);
+ expect_decode_err(test, "Zm$=", 4, false, BASE64_STD);
+ expect_decode_err(test, "Z===", 4, false, BASE64_STD);
+ expect_decode_err(test, "Zg=", 3, false, BASE64_STD);
+ expect_decode_err(test, "Zm9v====", 8, false, BASE64_STD);
+ expect_decode_err(test, "Zm==v", 4, false, BASE64_STD);
+
+ {
+ char with_nul[4] = { 'Z', 'g', '\0', '=' };
+
+ expect_decode_err(test, with_nul, 4, false, BASE64_STD);
+ }
+}
+
+/* ---------- Variant tests (URLSAFE / IMAP) ---------- */
+static void base64_variant_tests(struct kunit *test)
+{
+ const u8 sample1[] = { 0x00, 0xfb, 0xff, 0x7f, 0x80 };
+ char std_buf[128], url_buf[128], imap_buf[128];
+ u8 back[128];
+ int n_std, n_url, n_imap, m;
+ int i;
+
+ n_std = base64_encode(sample1, sizeof(sample1), std_buf, false, BASE64_STD);
+ n_url = base64_encode(sample1, sizeof(sample1), url_buf, false, BASE64_URLSAFE);
+ std_buf[n_std] = '\0';
+ url_buf[n_url] = '\0';
+
+ for (i = 0; i < n_std; i++) {
+ if (std_buf[i] == '+')
+ std_buf[i] = '-';
+ else if (std_buf[i] == '/')
+ std_buf[i] = '_';
+ }
+ KUNIT_EXPECT_STREQ(test, std_buf, url_buf);
+
+ m = base64_decode(url_buf, n_url, back, false, BASE64_URLSAFE);
+ KUNIT_EXPECT_EQ(test, m, (int)sizeof(sample1));
+ KUNIT_EXPECT_MEMEQ(test, back, sample1, sizeof(sample1));
+
+ n_std = base64_encode(sample1, sizeof(sample1), std_buf, false, BASE64_STD);
+ n_imap = base64_encode(sample1, sizeof(sample1), imap_buf, false, BASE64_IMAP);
+ std_buf[n_std] = '\0';
+ imap_buf[n_imap] = '\0';
+
+ for (i = 0; i < n_std; i++)
+ if (std_buf[i] == '/')
+ std_buf[i] = ',';
+ KUNIT_EXPECT_STREQ(test, std_buf, imap_buf);
+
+ m = base64_decode(imap_buf, n_imap, back, false, BASE64_IMAP);
+ KUNIT_EXPECT_EQ(test, m, (int)sizeof(sample1));
+ KUNIT_EXPECT_MEMEQ(test, back, sample1, sizeof(sample1));
+
+ {
+ const char *bad = "Zg==";
+ u8 tmp[8];
+
+ m = base64_decode(bad, strlen(bad), tmp, false, BASE64_URLSAFE);
+ KUNIT_EXPECT_EQ(test, m, -1);
+
+ m = base64_decode(bad, strlen(bad), tmp, false, BASE64_IMAP);
+ KUNIT_EXPECT_EQ(test, m, -1);
+ }
+}
+
+/* ---------- Test registration ---------- */
+static struct kunit_case base64_test_cases[] = {
+ KUNIT_CASE(base64_performance_tests),
+ KUNIT_CASE(base64_std_encode_tests),
+ KUNIT_CASE(base64_std_decode_tests),
+ KUNIT_CASE(base64_variant_tests),
+ {}
+};
+
+static struct kunit_suite base64_test_suite = {
+ .name = "base64",
+ .test_cases = base64_test_cases,
+};
+
+kunit_test_suite(base64_test_suite);
+
+MODULE_AUTHOR("Guan-Chun Wu <409411716@gms.tku.edu.tw>");
+MODULE_DESCRIPTION("KUnit tests for Base64 encoding/decoding, including performance checks");
+MODULE_LICENSE("GPL");
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 7b17b83c8042..b00a3a957de6 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -12,7 +12,7 @@
/* out-of-line parts */
-#if !defined(INLINE_COPY_FROM_USER) || defined(CONFIG_RUST)
+#if !defined(INLINE_COPY_FROM_USER)
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
return _inline_copy_from_user(to, from, n);
@@ -20,7 +20,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
EXPORT_SYMBOL(_copy_from_user);
#endif
-#if !defined(INLINE_COPY_TO_USER) || defined(CONFIG_RUST)
+#if !defined(INLINE_COPY_TO_USER)
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
return _inline_copy_to_user(to, from, n);
diff --git a/lib/xxhash.c b/lib/xxhash.c
index cf629766f376..4125b3e3cf7f 100644
--- a/lib/xxhash.c
+++ b/lib/xxhash.c
@@ -73,21 +73,6 @@ static const uint64_t PRIME64_3 = 1609587929392839161ULL;
static const uint64_t PRIME64_4 = 9650029242287828579ULL;
static const uint64_t PRIME64_5 = 2870177450012600261ULL;
-/*-**************************
- * Utils
- ***************************/
-void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)
-{
- memcpy(dst, src, sizeof(*dst));
-}
-EXPORT_SYMBOL(xxh32_copy_state);
-
-void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
-{
- memcpy(dst, src, sizeof(*dst));
-}
-EXPORT_SYMBOL(xxh64_copy_state);
-
/*-***************************
* Simple Hash Functions
****************************/
@@ -239,20 +224,6 @@ EXPORT_SYMBOL(xxh64);
/*-**************************************************
* Advanced Hash Functions
***************************************************/
-void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)
-{
- /* use a local state for memcpy() to avoid strict-aliasing warnings */
- struct xxh32_state state;
-
- memset(&state, 0, sizeof(state));
- state.v1 = seed + PRIME32_1 + PRIME32_2;
- state.v2 = seed + PRIME32_2;
- state.v3 = seed + 0;
- state.v4 = seed - PRIME32_1;
- memcpy(statePtr, &state, sizeof(state));
-}
-EXPORT_SYMBOL(xxh32_reset);
-
void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
{
/* use a local state for memcpy() to avoid strict-aliasing warnings */
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index 8237db17eee3..610d58d947ab 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -20,7 +20,6 @@ struct xz_dec_bcj {
enum {
BCJ_X86 = 4, /* x86 or x86-64 */
BCJ_POWERPC = 5, /* Big endian only */
- BCJ_IA64 = 6, /* Big or little endian */
BCJ_ARM = 7, /* Little endian only */
BCJ_ARMTHUMB = 8, /* Little endian only */
BCJ_SPARC = 9, /* Big or little endian */
@@ -180,92 +179,6 @@ static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
}
#endif
-#ifdef XZ_DEC_IA64
-static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
-{
- static const uint8_t branch_table[32] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 4, 4, 6, 6, 0, 0, 7, 7,
- 4, 4, 0, 0, 4, 4, 0, 0
- };
-
- /*
- * The local variables take a little bit stack space, but it's less
- * than what LZMA2 decoder takes, so it doesn't make sense to reduce
- * stack usage here without doing that for the LZMA2 decoder too.
- */
-
- /* Loop counters */
- size_t i;
- size_t j;
-
- /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
- uint32_t slot;
-
- /* Bitwise offset of the instruction indicated by slot */
- uint32_t bit_pos;
-
- /* bit_pos split into byte and bit parts */
- uint32_t byte_pos;
- uint32_t bit_res;
-
- /* Address part of an instruction */
- uint32_t addr;
-
- /* Mask used to detect which instructions to convert */
- uint32_t mask;
-
- /* 41-bit instruction stored somewhere in the lowest 48 bits */
- uint64_t instr;
-
- /* Instruction normalized with bit_res for easier manipulation */
- uint64_t norm;
-
- size &= ~(size_t)15;
-
- for (i = 0; i < size; i += 16) {
- mask = branch_table[buf[i] & 0x1F];
- for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
- if (((mask >> slot) & 1) == 0)
- continue;
-
- byte_pos = bit_pos >> 3;
- bit_res = bit_pos & 7;
- instr = 0;
- for (j = 0; j < 6; ++j)
- instr |= (uint64_t)(buf[i + j + byte_pos])
- << (8 * j);
-
- norm = instr >> bit_res;
-
- if (((norm >> 37) & 0x0F) == 0x05
- && ((norm >> 9) & 0x07) == 0) {
- addr = (norm >> 13) & 0x0FFFFF;
- addr |= ((uint32_t)(norm >> 36) & 1) << 20;
- addr <<= 4;
- addr -= s->pos + (uint32_t)i;
- addr >>= 4;
-
- norm &= ~((uint64_t)0x8FFFFF << 13);
- norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
- norm |= (uint64_t)(addr & 0x100000)
- << (36 - 20);
-
- instr &= (1 << bit_res) - 1;
- instr |= norm << bit_res;
-
- for (j = 0; j < 6; j++)
- buf[i + j + byte_pos]
- = (uint8_t)(instr >> (8 * j));
- }
- }
- }
-
- return i;
-}
-#endif
-
#ifdef XZ_DEC_ARM
static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
{
@@ -509,11 +422,6 @@ static void bcj_apply(struct xz_dec_bcj *s,
filtered = bcj_powerpc(s, buf, size);
break;
#endif
-#ifdef XZ_DEC_IA64
- case BCJ_IA64:
- filtered = bcj_ia64(s, buf, size);
- break;
-#endif
#ifdef XZ_DEC_ARM
case BCJ_ARM:
filtered = bcj_arm(s, buf, size);
@@ -699,9 +607,6 @@ enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
#ifdef XZ_DEC_POWERPC
case BCJ_POWERPC:
#endif
-#ifdef XZ_DEC_IA64
- case BCJ_IA64:
-#endif
#ifdef XZ_DEC_ARM
case BCJ_ARM:
#endif
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 8409784b1639..6775078f3cce 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -24,9 +24,6 @@
# ifdef CONFIG_XZ_DEC_POWERPC
# define XZ_DEC_POWERPC
# endif
-# ifdef CONFIG_XZ_DEC_IA64
-# define XZ_DEC_IA64
-# endif
# ifdef CONFIG_XZ_DEC_ARM
# define XZ_DEC_ARM
# endif
@@ -103,7 +100,6 @@
*/
#ifndef XZ_DEC_BCJ
# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
- || defined(XZ_DEC_IA64) \
|| defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
|| defined(XZ_DEC_SPARC) || defined(XZ_DEC_ARM64) \
|| defined(XZ_DEC_RISCV)