summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug22
-rw-r--r--lib/Kconfig.kfence1
-rw-r--r--lib/bch.c2
-rw-r--r--lib/bitmap.c42
-rw-r--r--lib/bug.c54
-rw-r--r--lib/cmdline.c1
-rw-r--r--lib/crc8.c2
-rw-r--r--lib/decompress_unlzma.c2
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/find_bit.c68
-rw-r--r--lib/genalloc.c7
-rw-r--r--lib/iov_iter.c8
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/parser.c61
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/stackdepot.c6
-rw-r--r--lib/test_kasan.c59
-rw-r--r--lib/test_vmalloc.c128
-rw-r--r--lib/vdso/gettimeofday.c31
19 files changed, 238 insertions, 262 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5443da4e5e8a..678c13967580 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -284,8 +284,7 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_DWARF5
bool "Generate DWARF Version 5 debuginfo"
- depends on GCC_VERSION >= 50000 || CC_IS_CLANG
- depends on CC_IS_GCC || $(success,$(srctree)/scripts/test_dwarf5_support.sh $(CC) $(CLANG_FLAGS))
+ depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
depends on !DEBUG_INFO_BTF
help
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
@@ -449,6 +448,16 @@ config VMLINUX_VALIDATION
depends on STACK_VALIDATION && DEBUG_ENTRY && !PARAVIRT
default y
+config VMLINUX_MAP
+ bool "Generate vmlinux.map file when linking"
+ depends on EXPERT
+ help
+ Selecting this option will pass "-Map=vmlinux.map" to ld
+ when linking vmlinux. That file can be useful for verifying
+ and debugging magic section games, and for seeing which
+ pieces of code get eliminated with
+ CONFIG_LD_DEAD_CODE_DATA_ELIMINATION.
+
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
depends on DEBUG_KERNEL
@@ -2564,11 +2573,18 @@ config TEST_FPU
endif # RUNTIME_TESTING_MENU
+config ARCH_USE_MEMTEST
+ bool
+ help
+ An architecture should select this when it uses early_memtest()
+ during boot process.
+
config MEMTEST
bool "Memtest"
+ depends on ARCH_USE_MEMTEST
help
This option adds a kernel parameter 'memtest', which allows memtest
- to be set.
+ to be set and executed.
memtest=0, mean disabled; -- default
memtest=1, mean do 1 test pattern;
...
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index 78f50ccb3b45..e641add33947 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -7,6 +7,7 @@ menuconfig KFENCE
bool "KFENCE: low-overhead sampling-based memory safety error detector"
depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
select STACKTRACE
+ select IRQ_WORK
help
KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
access, use-after-free, and invalid-free errors. KFENCE is designed
diff --git a/lib/bch.c b/lib/bch.c
index 7c031ee8b93b..c8095f30f254 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -584,7 +584,7 @@ static int find_affine4_roots(struct bch_control *bch, unsigned int a,
k = a_log(bch, a);
rows[0] = c;
- /* buid linear system to solve X^4+aX^2+bX+c = 0 */
+ /* build linear system to solve X^4+aX^2+bX+c = 0 */
for (i = 0; i < m; i++) {
rows[i+1] = bch->a_pow_tab[4*i]^
(a ? bch->a_pow_tab[mod_s(bch, k)] : 0)^
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 9f4626a4c95f..74ceb02f45e3 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -3,17 +3,19 @@
* lib/bitmap.c
* Helper functions for bitmap.h.
*/
-#include <linux/export.h>
-#include <linux/thread_info.h>
-#include <linux/ctype.h>
-#include <linux/errno.h>
+
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <asm/page.h>
@@ -1271,6 +1273,38 @@ void bitmap_free(const unsigned long *bitmap)
}
EXPORT_SYMBOL(bitmap_free);
+static void devm_bitmap_free(void *data)
+{
+ unsigned long *bitmap = data;
+
+ bitmap_free(bitmap);
+}
+
+unsigned long *devm_bitmap_alloc(struct device *dev,
+ unsigned int nbits, gfp_t flags)
+{
+ unsigned long *bitmap;
+ int ret;
+
+ bitmap = bitmap_alloc(nbits, flags);
+ if (!bitmap)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap);
+ if (ret)
+ return NULL;
+
+ return bitmap;
+}
+EXPORT_SYMBOL_GPL(devm_bitmap_alloc);
+
+unsigned long *devm_bitmap_zalloc(struct device *dev,
+ unsigned int nbits, gfp_t flags)
+{
+ return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL_GPL(devm_bitmap_zalloc);
+
#if BITS_PER_LONG == 64
/**
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
diff --git a/lib/bug.c b/lib/bug.c
index 8f9d537bfb2a..45a0584f6541 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -127,6 +127,22 @@ static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
}
#endif
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ *file = bug->file;
+#else
+ *file = (const char *)bug + bug->file_disp;
+#endif
+ *line = bug->line;
+#else
+ *file = NULL;
+ *line = 0;
+#endif
+}
+
struct bug_entry *find_bug(unsigned long bugaddr)
{
struct bug_entry *bug;
@@ -153,32 +169,20 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
disable_trace_on_warning();
- file = NULL;
- line = 0;
- warning = 0;
+ bug_get_file_line(bug, &file, &line);
- if (bug) {
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- file = bug->file;
-#else
- file = (const char *)bug + bug->file_disp;
-#endif
- line = bug->line;
-#endif
- warning = (bug->flags & BUGFLAG_WARNING) != 0;
- once = (bug->flags & BUGFLAG_ONCE) != 0;
- done = (bug->flags & BUGFLAG_DONE) != 0;
-
- if (warning && once) {
- if (done)
- return BUG_TRAP_TYPE_WARN;
-
- /*
- * Since this is the only store, concurrency is not an issue.
- */
- bug->flags |= BUGFLAG_DONE;
- }
+ warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ once = (bug->flags & BUGFLAG_ONCE) != 0;
+ done = (bug->flags & BUGFLAG_DONE) != 0;
+
+ if (warning && once) {
+ if (done)
+ return BUG_TRAP_TYPE_WARN;
+
+ /*
+ * Since this is the only store, concurrency is not an issue.
+ */
+ bug->flags |= BUGFLAG_DONE;
}
/*
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 5d474c626e24..5546bf588780 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -272,3 +272,4 @@ char *next_arg(char *args, char **param, char **val)
/* Chew up trailing spaces. */
return skip_spaces(args);
}
+EXPORT_SYMBOL(next_arg);
diff --git a/lib/crc8.c b/lib/crc8.c
index 595a5a75e3cd..1ad8e501d9b6 100644
--- a/lib/crc8.c
+++ b/lib/crc8.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(crc8_populate_lsb);
* @nbytes: number of bytes in data buffer.
* @crc: previous returned crc8 value.
*/
-u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc)
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc)
{
/* loop over the buffer data */
while (nbytes-- > 0)
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 1cf409ef8d04..20a858031f12 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -391,7 +391,7 @@ static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
struct cstate *cst, uint16_t *p,
int pos_state, uint16_t *prob) {
- int offset;
+ int offset;
uint16_t *prob_len;
int num_bits;
int len;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c70d6347afa2..921d0a654243 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
/* tail :$info is function or line-range */
fline = strchr(query->filename, ':');
if (!fline)
- break;
+ continue;
*fline++ = '\0';
if (isalpha(*fline) || *fline == '*' || *fline == '?') {
/* take as function name */
diff --git a/lib/find_bit.c b/lib/find_bit.c
index f67f86fd2f62..0f8e2e369b1d 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -29,7 +29,7 @@
* searching it for one bits.
* - The optional "addr2", which is anded with "addr1" if present.
*/
-static unsigned long _find_next_bit(const unsigned long *addr1,
+unsigned long _find_next_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert, unsigned long le)
{
@@ -68,44 +68,14 @@ static unsigned long _find_next_bit(const unsigned long *addr1,
return min(start + __ffs(tmp), nbits);
}
-#endif
-
-#ifndef find_next_bit
-/*
- * Find the next set bit in a memory region.
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
-}
-EXPORT_SYMBOL(find_next_bit);
-#endif
-
-#ifndef find_next_zero_bit
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
-#endif
-
-#if !defined(find_next_and_bit)
-unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset)
-{
- return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
-}
-EXPORT_SYMBOL(find_next_and_bit);
+EXPORT_SYMBOL(_find_next_bit);
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
-unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
@@ -116,14 +86,14 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
return size;
}
-EXPORT_SYMBOL(find_first_bit);
+EXPORT_SYMBOL(_find_first_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
-unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
@@ -134,11 +104,11 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
return size;
}
-EXPORT_SYMBOL(find_first_zero_bit);
+EXPORT_SYMBOL(_find_first_zero_bit);
#endif
#ifndef find_last_bit
-unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
{
if (size) {
unsigned long val = BITMAP_LAST_WORD_MASK(size);
@@ -154,31 +124,9 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
}
return size;
}
-EXPORT_SYMBOL(find_last_bit);
-#endif
-
-#ifdef __BIG_ENDIAN
-
-#ifndef find_next_zero_bit_le
-unsigned long find_next_zero_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
-}
-EXPORT_SYMBOL(find_next_zero_bit_le);
-#endif
-
-#ifndef find_next_bit_le
-unsigned long find_next_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
-}
-EXPORT_SYMBOL(find_next_bit_le);
+EXPORT_SYMBOL(_find_last_bit);
#endif
-#endif /* __BIG_ENDIAN */
-
unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
unsigned long size, unsigned long offset)
{
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 5dcf9cdcbc46..9a57257988c7 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -642,6 +642,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
* @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
@@ -660,6 +661,7 @@ EXPORT_SYMBOL(gen_pool_first_fit);
* @nr: The number of zeroed bits we're looking for
* @data: data for alignment
* @pool: pool to get order from
+ * @start_addr: start addr of alloction chunk
*/
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
@@ -687,6 +689,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
* @nr: The number of zeroed bits we're looking for
* @data: data for alignment
* @pool: pool to get order from
+ * @start_addr: not used in this function
*/
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
@@ -721,6 +724,7 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
* @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
*/
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start,
@@ -735,13 +739,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
/**
* gen_pool_best_fit - find the best fitting region of memory
- * macthing the size requirement (no alignment constraint)
+ * matching the size requirement (no alignment constraint)
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
* @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
*
* Iterate over the bitmap to find the smallest free region
* which we can allocate the memory.
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 61228a6c69f8..c701b7a187f2 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -5,6 +5,7 @@
#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
+#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/splice.h>
@@ -507,13 +508,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static void memzero_page(struct page *page, size_t offset, size_t len)
-{
- char *addr = kmap_atomic(page);
- memset(addr + offset, 0, len);
- kunmap_atomic(addr);
-}
-
static inline bool allocated(struct pipe_buffer *buf)
{
return buf->ops == &default_pipe_buf_ops;
diff --git a/lib/list_sort.c b/lib/list_sort.c
index a926d96ffd44..1e1e37762799 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -137,7 +137,7 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
*
*
* The merging is controlled by "count", the number of elements in the
- * pending lists. This is beautiully simple code, but rather subtle.
+ * pending lists. This is beautifully simple code, but rather subtle.
*
* Each time we increment "count", we set one bit (bit k) and clear
* bits k-1 .. 0. Each time this happens (except the very first time
diff --git a/lib/parser.c b/lib/parser.c
index 7a5769db389f..f1a6d90b8c34 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -98,7 +98,7 @@ static int match_one(char *s, const char *p, substring_t args[])
* locations.
*
* Description: Detects which if any of a set of token strings has been passed
- * to it. Tokens can include up to MAX_OPT_ARGS instances of basic c-style
+ * to it. Tokens can include up to %MAX_OPT_ARGS instances of basic c-style
* format identifiers which will be taken into account when matching the
* tokens, and whose locations will be returned in the @args array.
*/
@@ -120,8 +120,10 @@ EXPORT_SYMBOL(match_token);
* @base: base to use when converting string
*
* Description: Given a &substring_t and a base, attempts to parse the substring
- * as a number in that base. On success, sets @result to the integer represented
- * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * as a number in that base.
+ *
+ * Return: On success, sets @result to the integer represented by the
+ * string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
static int match_number(substring_t *s, int *result, int base)
{
@@ -153,8 +155,10 @@ static int match_number(substring_t *s, int *result, int base)
* @base: base to use when converting string
*
* Description: Given a &substring_t and a base, attempts to parse the substring
- * as a number in that base. On success, sets @result to the integer represented
- * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * as a number in that base.
+ *
+ * Return: On success, sets @result to the integer represented by the
+ * string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
static int match_u64int(substring_t *s, u64 *result, int base)
{
@@ -178,9 +182,10 @@ static int match_u64int(substring_t *s, u64 *result, int base)
* @s: substring_t to be scanned
* @result: resulting integer on success
*
- * Description: Attempts to parse the &substring_t @s as a decimal integer. On
- * success, sets @result to the integer represented by the string and returns 0.
- * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * Description: Attempts to parse the &substring_t @s as a decimal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
int match_int(substring_t *s, int *result)
{
@@ -188,14 +193,15 @@ int match_int(substring_t *s, int *result)
}
EXPORT_SYMBOL(match_int);
-/*
+/**
* match_uint - scan a decimal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
- * Description: Attempts to parse the &substring_t @s as a decimal integer. On
- * success, sets @result to the integer represented by the string and returns 0.
- * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * Description: Attempts to parse the &substring_t @s as a decimal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
int match_uint(substring_t *s, unsigned int *result)
{
@@ -217,9 +223,10 @@ EXPORT_SYMBOL(match_uint);
* @result: resulting unsigned long long on success
*
* Description: Attempts to parse the &substring_t @s as a long decimal
- * integer. On success, sets @result to the integer represented by the
- * string and returns 0.
- * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
int match_u64(substring_t *s, u64 *result)
{
@@ -232,9 +239,10 @@ EXPORT_SYMBOL(match_u64);
* @s: substring_t to be scanned
* @result: resulting integer on success
*
- * Description: Attempts to parse the &substring_t @s as an octal integer. On
- * success, sets @result to the integer represented by the string and returns
- * 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * Description: Attempts to parse the &substring_t @s as an octal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
int match_octal(substring_t *s, int *result)
{
@@ -248,8 +256,9 @@ EXPORT_SYMBOL(match_octal);
* @result: resulting integer on success
*
* Description: Attempts to parse the &substring_t @s as a hexadecimal integer.
- * On success, sets @result to the integer represented by the string and
- * returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
int match_hex(substring_t *s, int *result)
{
@@ -263,10 +272,11 @@ EXPORT_SYMBOL(match_hex);
* @str: the string to be parsed
*
* Description: Parse the string @str to check if matches wildcard
- * pattern @pattern. The pattern may contain two type wildcardes:
+ * pattern @pattern. The pattern may contain two types of wildcards:
* '*' - matches zero or more characters
* '?' - matches one character
- * If it's matched, return true, else return false.
+ *
+ * Return: If the @str matches the @pattern, return true, else return false.
*/
bool match_wildcard(const char *pattern, const char *str)
{
@@ -316,7 +326,9 @@ EXPORT_SYMBOL(match_wildcard);
*
* Description: Copy the characters in &substring_t @src to the
* c-style string @dest. Copy no more than @size - 1 characters, plus
- * the terminating NUL. Return length of @src.
+ * the terminating NUL.
+ *
+ * Return: length of @src.
*/
size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
{
@@ -338,6 +350,9 @@ EXPORT_SYMBOL(match_strlcpy);
* Description: Allocates and returns a string filled with the contents of
* the &substring_t @s. The caller is responsible for freeing the returned
* string with kfree().
+ *
+ * Return: the address of the newly allocated NUL-terminated string or
+ * %NULL on error.
*/
char *match_strdup(const substring_t *s)
{
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 00f666d94486..ed610b75dc32 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
-/**
+/*
* This function is both preempt and irq safe. The former is due to explicit
* preemption disable. The latter is guaranteed by the fact that the slow path
* is explicitly protected by an irq-safe spinlock whereas the fast patch uses
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 49f67a0c6e5d..df9179f4f441 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -71,7 +71,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
static int next_slab_inited;
static size_t depot_offset;
-static DEFINE_SPINLOCK(depot_lock);
+static DEFINE_RAW_SPINLOCK(depot_lock);
static bool init_stack_slab(void **prealloc)
{
@@ -305,7 +305,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
prealloc = page_address(page);
}
- spin_lock_irqsave(&depot_lock, flags);
+ raw_spin_lock_irqsave(&depot_lock, flags);
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
@@ -329,7 +329,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
WARN_ON(!init_stack_slab(&prealloc));
}
- spin_unlock_irqrestore(&depot_lock, flags);
+ raw_spin_unlock_irqrestore(&depot_lock, flags);
exit:
if (prealloc) {
/* Nobody used this memory, ok to free it. */
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 785e724ce0d8..dc05cfc2d12f 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -54,6 +54,10 @@ static int kasan_test_init(struct kunit *test)
multishot = kasan_save_enable_multi_shot();
kasan_set_tagging_report_once(false);
+ fail_data.report_found = false;
+ fail_data.report_expected = false;
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "kasan_data", &fail_data);
return 0;
}
@@ -61,6 +65,7 @@ static void kasan_test_exit(struct kunit *test)
{
kasan_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
+ KUNIT_EXPECT_FALSE(test, fail_data.report_found);
}
/**
@@ -78,33 +83,31 @@ static void kasan_test_exit(struct kunit *test)
* fields, it can reorder or optimize away the accesses to those fields.
* Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
* expression to prevent that.
+ *
+ * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
+ * false. This allows detecting KASAN reports that happen outside of the checks
+ * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
+ * and in kasan_test_exit.
*/
-#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
- !kasan_async_mode_enabled()) \
- migrate_disable(); \
- WRITE_ONCE(fail_data.report_expected, true); \
- WRITE_ONCE(fail_data.report_found, false); \
- kunit_add_named_resource(test, \
- NULL, \
- NULL, \
- &resource, \
- "kasan_data", &fail_data); \
- barrier(); \
- expression; \
- barrier(); \
- if (kasan_async_mode_enabled()) \
- kasan_force_async_fault(); \
- barrier(); \
- KUNIT_EXPECT_EQ(test, \
- READ_ONCE(fail_data.report_expected), \
- READ_ONCE(fail_data.report_found)); \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
- !kasan_async_mode_enabled()) { \
- if (READ_ONCE(fail_data.report_found)) \
- kasan_enable_tagging_sync(); \
- migrate_enable(); \
- } \
+#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
+ !kasan_async_mode_enabled()) \
+ migrate_disable(); \
+ KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
+ WRITE_ONCE(fail_data.report_expected, true); \
+ barrier(); \
+ expression; \
+ barrier(); \
+ KUNIT_EXPECT_EQ(test, \
+ READ_ONCE(fail_data.report_expected), \
+ READ_ONCE(fail_data.report_found)); \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
+ if (READ_ONCE(fail_data.report_found)) \
+ kasan_enable_tagging_sync(); \
+ migrate_enable(); \
+ } \
+ WRITE_ONCE(fail_data.report_found, false); \
+ WRITE_ONCE(fail_data.report_expected, false); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
@@ -1049,14 +1052,14 @@ static void match_all_mem_tag(struct kunit *test)
continue;
/* Mark the first memory granule with the chosen memory tag. */
- kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
/* This access must cause a KASAN report. */
KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
}
/* Recover the memory tag and free. */
- kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
kfree(ptr);
}
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 5cf2fe9aab9e..01e9543de566 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -23,8 +23,8 @@
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg) \
-__param(bool, single_cpu_test, false,
- "Use single first online CPU to run tests");
+__param(int, nr_threads, 0,
+ "Number of workers to perform tests(min: 1 max: USHRT_MAX)");
__param(bool, sequential_test_order, false,
"Use sequential stress tests order");
@@ -47,19 +47,10 @@ __param(int, run_test_mask, INT_MAX,
"\t\tid: 128, name: pcpu_alloc_test\n"
"\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n"
"\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n"
- "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n"
- "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n"
/* Add a new test case description here. */
);
/*
- * Depends on single_cpu_test parameter. If it is true, then
- * use first online CPU to trigger a test on, otherwise go with
- * all online CPUs.
- */
-static cpumask_t cpus_run_test_mask = CPU_MASK_NONE;
-
-/*
* Read write semaphore for synchronization of setup
* phase that is done in main thread and workers.
*/
@@ -363,42 +354,6 @@ kvfree_rcu_2_arg_vmalloc_test(void)
return 0;
}
-static int
-kvfree_rcu_1_arg_slab_test(void)
-{
- struct test_kvfree_rcu *p;
- int i;
-
- for (i = 0; i < test_loop_count; i++) {
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -1;
-
- p->array[0] = 'a';
- kvfree_rcu(p);
- }
-
- return 0;
-}
-
-static int
-kvfree_rcu_2_arg_slab_test(void)
-{
- struct test_kvfree_rcu *p;
- int i;
-
- for (i = 0; i < test_loop_count; i++) {
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -1;
-
- p->array[0] = 'a';
- kvfree_rcu(p, rcu);
- }
-
- return 0;
-}
-
struct test_case_desc {
const char *test_name;
int (*test_func)(void);
@@ -415,8 +370,6 @@ static struct test_case_desc test_case_array[] = {
{ "pcpu_alloc_test", pcpu_alloc_test },
{ "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
{ "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
- { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test },
- { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test },
/* Add a new test case here. */
};
@@ -426,16 +379,13 @@ struct test_case_data {
u64 time;
};
-/* Split it to get rid of: WARNING: line over 80 characters */
-static struct test_case_data
- per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)];
-
static struct test_driver {
struct task_struct *task;
+ struct test_case_data data[ARRAY_SIZE(test_case_array)];
+
unsigned long start;
unsigned long stop;
- int cpu;
-} per_cpu_test_driver[NR_CPUS];
+} *tdriver;
static void shuffle_array(int *arr, int n)
{
@@ -463,9 +413,6 @@ static int test_func(void *private)
ktime_t kt;
u64 delta;
- if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0)
- pr_err("Failed to set affinity to %d CPU\n", t->cpu);
-
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
random_array[i] = i;
@@ -490,9 +437,9 @@ static int test_func(void *private)
kt = ktime_get();
for (j = 0; j < test_repeat_count; j++) {
if (!test_case_array[index].test_func())
- per_cpu_test_data[t->cpu][index].test_passed++;
+ t->data[index].test_passed++;
else
- per_cpu_test_data[t->cpu][index].test_failed++;
+ t->data[index].test_failed++;
}
/*
@@ -501,7 +448,7 @@ static int test_func(void *private)
delta = (u64) ktime_us_delta(ktime_get(), kt);
do_div(delta, (u32) test_repeat_count);
- per_cpu_test_data[t->cpu][index].time = delta;
+ t->data[index].time = delta;
}
t->stop = get_cycles();
@@ -517,53 +464,56 @@ static int test_func(void *private)
return 0;
}
-static void
+static int
init_test_configurtion(void)
{
/*
- * Reset all data of all CPUs.
+ * A maximum number of workers is defined as hard-coded
+ * value and set to USHRT_MAX. We add such gap just in
+ * case and for potential heavy stressing.
*/
- memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data));
+ nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX);
- if (single_cpu_test)
- cpumask_set_cpu(cpumask_first(cpu_online_mask),
- &cpus_run_test_mask);
- else
- cpumask_and(&cpus_run_test_mask, cpu_online_mask,
- cpu_online_mask);
+ /* Allocate the space for test instances. */
+ tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL);
+ if (tdriver == NULL)
+ return -1;
if (test_repeat_count <= 0)
test_repeat_count = 1;
if (test_loop_count <= 0)
test_loop_count = 1;
+
+ return 0;
}
static void do_concurrent_test(void)
{
- int cpu, ret;
+ int i, ret;
/*
* Set some basic configurations plus sanity check.
*/
- init_test_configurtion();
+ ret = init_test_configurtion();
+ if (ret < 0)
+ return;
/*
* Put on hold all workers.
*/
down_write(&prepare_for_test_rwsem);
- for_each_cpu(cpu, &cpus_run_test_mask) {
- struct test_driver *t = &per_cpu_test_driver[cpu];
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
- t->cpu = cpu;
- t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu);
+ t->task = kthread_run(test_func, t, "vmalloc_test/%d", i);
if (!IS_ERR(t->task))
/* Success. */
atomic_inc(&test_n_undone);
else
- pr_err("Failed to start kthread for %d CPU\n", cpu);
+ pr_err("Failed to start %d kthread\n", i);
}
/*
@@ -581,29 +531,31 @@ static void do_concurrent_test(void)
ret = wait_for_completion_timeout(&test_all_done_comp, HZ);
} while (!ret);
- for_each_cpu(cpu, &cpus_run_test_mask) {
- struct test_driver *t = &per_cpu_test_driver[cpu];
- int i;
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
+ int j;
if (!IS_ERR(t->task))
kthread_stop(t->task);
- for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
- if (!((run_test_mask & (1 << i)) >> i))
+ for (j = 0; j < ARRAY_SIZE(test_case_array); j++) {
+ if (!((run_test_mask & (1 << j)) >> j))
continue;
pr_info(
"Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n",
- test_case_array[i].test_name,
- per_cpu_test_data[cpu][i].test_passed,
- per_cpu_test_data[cpu][i].test_failed,
+ test_case_array[j].test_name,
+ t->data[j].test_passed,
+ t->data[j].test_failed,
test_repeat_count, test_loop_count,
- per_cpu_test_data[cpu][i].time);
+ t->data[j].time);
}
- pr_info("All test took CPU%d=%lu cycles\n",
- cpu, t->stop - t->start);
+ pr_info("All test took worker%d=%lu cycles\n",
+ i, t->stop - t->start);
}
+
+ kvfree(tdriver);
}
static int vmalloc_test_init(void)
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 2919f1698140..ce2f69552003 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -46,16 +46,18 @@ static inline bool vdso_cycles_ok(u64 cycles)
#endif
#ifdef CONFIG_TIME_NS
-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_timens_vdso_data();
+ const struct vdso_data *vd;
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
u64 cycles, last, ns;
u32 seq;
s64 sec;
+ vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
+ vd = __arch_get_timens_vdso_data(vd);
if (clk != CLOCK_MONOTONIC_RAW)
vd = &vd[CS_HRES_COARSE];
else
@@ -92,13 +94,14 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
return 0;
}
#else
-static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
return NULL;
}
-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
return -EINVAL;
}
@@ -159,10 +162,10 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
}
#ifdef CONFIG_TIME_NS
-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_timens_vdso_data();
+ const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
const struct timens_offset *offs = &vdns->offset[clk];
u64 nsec;
@@ -188,8 +191,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
return 0;
}
#else
-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
return -1;
}
@@ -310,7 +313,7 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd,
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data();
+ vd = __arch_get_timens_vdso_data(vd);
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
@@ -333,7 +336,7 @@ __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data();
+ vd = __arch_get_timens_vdso_data(vd);
t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
@@ -363,7 +366,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data();
+ vd = __arch_get_timens_vdso_data(vd);
/*
* Convert the clockid to a bitmask and use it to check which