summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/alloc_tag.c34
-rw-r--r--lib/errseq.c13
-rw-r--r--lib/iov_iter.c30
-rw-r--r--lib/kstrtox.c4
-rw-r--r--lib/llist.c22
-rw-r--r--lib/maple_tree.c191
-rw-r--r--lib/oid_registry.c25
-rw-r--r--lib/raid6/algos.c6
-rw-r--r--lib/raid6/avx512.c4
-rw-r--r--lib/raid6/recov_avx512.c6
-rw-r--r--lib/raid6/test/Makefile3
-rw-r--r--lib/rbtree.c8
-rw-r--r--lib/scatterlist.c23
-rw-r--r--lib/test_fortify/Makefile5
-rw-r--r--lib/test_kmod.c64
-rw-r--r--lib/test_vmalloc.c22
-rw-r--r--lib/test_xarray.c17
-rw-r--r--lib/tests/stackinit_kunit.c10
-rw-r--r--lib/xarray.c9
20 files changed, 230 insertions, 285 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6479cec900c7..ebe33181b6e6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2153,18 +2153,12 @@ config ARCH_HAS_KCOV
build and run with CONFIG_KCOV. This typically requires
disabling instrumentation for some early boot code.
-config CC_HAS_SANCOV_TRACE_PC
- def_bool $(cc-option,-fsanitize-coverage=trace-pc)
-
-
config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
- depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
GCC_VERSION >= 120000 || CC_IS_CLANG
select DEBUG_FS
- select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select OBJTOOL if HAVE_NOINSTR_HACK
help
KCOV exposes kernel code coverage information in a form suitable
@@ -2574,8 +2568,7 @@ config TEST_BITOPS
config TEST_VMALLOC
tristate "Test module for stress/performance analysis of vmalloc allocator"
default n
- depends on MMU
- depends on m
+ depends on MMU
help
This builds the "test_vmalloc" module that should be used for
stress and performance analysis. So, any new change for vmalloc
@@ -2878,9 +2871,7 @@ config STACKINIT_KUNIT_TEST
help
Test if the kernel is zero-initializing stack variables and
padding. Coverage is controlled by compiler flags,
- CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO,
- CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
- or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+ CONFIG_INIT_STACK_ALL_PATTERN or CONFIG_INIT_STACK_ALL_ZERO.
config FORTIFY_KUNIT_TEST
tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
@@ -2991,13 +2982,7 @@ config TEST_DYNAMIC_DEBUG
config TEST_KMOD
tristate "kmod stress tester"
depends on m
- depends on NETDEVICES && NET_CORE && INET # for TUN
- depends on BLOCK
- depends on PAGE_SIZE_LESS_THAN_256KB # for BTRFS
select TEST_LKM
- select XFS_FS
- select TUN
- select BTRFS_FS
help
Test the kernel's module loading mechanism: kmod. kmod implements
support to load modules using the Linux kernel's usermode helper.
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index c7f602fa7b23..45dae7da70e1 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -244,17 +244,6 @@ static void shutdown_mem_profiling(bool remove_file)
mem_profiling_support = false;
}
-static void __init procfs_init(void)
-{
- if (!mem_profiling_support)
- return;
-
- if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) {
- pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
- shutdown_mem_profiling(false);
- }
-}
-
void __init alloc_tag_sec_init(void)
{
struct alloc_tag *last_codetag;
@@ -813,19 +802,34 @@ static int __init alloc_tag_init(void)
};
int res;
+ sysctl_init();
+
+ if (!mem_profiling_support) {
+ pr_info("Memory allocation profiling is not supported!\n");
+ return 0;
+ }
+
+ if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) {
+ pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
+ shutdown_mem_profiling(false);
+ return -ENOMEM;
+ }
+
res = alloc_mod_tags_mem();
- if (res)
+ if (res) {
+ pr_err("Failed to reserve address space for module tags, errno = %d\n", res);
+ shutdown_mem_profiling(true);
return res;
+ }
alloc_tag_cttype = codetag_register_type(&desc);
if (IS_ERR(alloc_tag_cttype)) {
+ pr_err("Allocation tags registration failed, errno = %ld\n", PTR_ERR(alloc_tag_cttype));
free_mod_tags_mem();
+ shutdown_mem_profiling(true);
return PTR_ERR(alloc_tag_cttype);
}
- sysctl_init();
- procfs_init();
-
return 0;
}
module_init(alloc_tag_init);
diff --git a/lib/errseq.c b/lib/errseq.c
index 93e9b94358dc..13a2581c5a87 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -34,11 +34,14 @@
*/
/* The low bits are designated for error code (max of MAX_ERRNO) */
-#define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1)
+#define ERRSEQ_SHIFT (ilog2(MAX_ERRNO) + 1)
/* This bit is used as a flag to indicate whether the value has been seen */
#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT)
+/* Leverage macro ERRSEQ_SEEN to define errno mask macro here */
+#define ERRNO_MASK (ERRSEQ_SEEN - 1)
+
/* The lowest bit of the counter */
#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1))
@@ -60,8 +63,6 @@ errseq_t errseq_set(errseq_t *eseq, int err)
{
errseq_t cur, old;
- /* MAX_ERRNO must be able to serve as a mask */
- BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1);
/*
* Ensure the error code actually fits where we want it to go. If it
@@ -79,7 +80,7 @@ errseq_t errseq_set(errseq_t *eseq, int err)
errseq_t new;
/* Clear out error bits and set new error */
- new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err;
+ new = (old & ~(ERRNO_MASK | ERRSEQ_SEEN)) | -err;
/* Only increment if someone has looked at it */
if (old & ERRSEQ_SEEN)
@@ -148,7 +149,7 @@ int errseq_check(errseq_t *eseq, errseq_t since)
if (likely(cur == since))
return 0;
- return -(cur & MAX_ERRNO);
+ return -(cur & ERRNO_MASK);
}
EXPORT_SYMBOL(errseq_check);
@@ -200,7 +201,7 @@ int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
if (new != old)
cmpxchg(eseq, old, new);
*since = new;
- err = -(new & MAX_ERRNO);
+ err = -(new & ERRNO_MASK);
}
return err;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index bc9391e55d57..d9e19fb2dcf3 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1059,22 +1059,22 @@ static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa
pgoff_t index, unsigned int nr_pages)
{
XA_STATE(xas, xa, index);
- struct page *page;
+ struct folio *folio;
unsigned int ret = 0;
rcu_read_lock();
- for (page = xas_load(&xas); page; page = xas_next(&xas)) {
- if (xas_retry(&xas, page))
+ for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
+ if (xas_retry(&xas, folio))
continue;
- /* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas))) {
+ /* Has the folio moved or been split? */
+ if (unlikely(folio != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
- pages[ret] = find_subpage(page, xas.xa_index);
- get_page(pages[ret]);
+ pages[ret] = folio_file_page(folio, xas.xa_index);
+ folio_get(folio);
if (++ret == nr_pages)
break;
}
@@ -1650,11 +1650,11 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
- struct page *page, **p;
+ struct page **p;
+ struct folio *folio;
unsigned int nr = 0, offset;
loff_t pos = i->xarray_start + i->iov_offset;
- pgoff_t index = pos >> PAGE_SHIFT;
- XA_STATE(xas, i->xarray, index);
+ XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT);
offset = pos & ~PAGE_MASK;
*offset0 = offset;
@@ -1665,17 +1665,17 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
p = *pages;
rcu_read_lock();
- for (page = xas_load(&xas); page; page = xas_next(&xas)) {
- if (xas_retry(&xas, page))
+ for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
+ if (xas_retry(&xas, folio))
continue;
- /* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas))) {
+ /* Has the folio moved or been split? */
+ if (unlikely(folio != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
- p[nr++] = find_subpage(page, xas.xa_index);
+ p[nr++] = folio_file_page(folio, xas.xa_index);
if (nr == maxpages)
break;
}
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index d586e6af5e5a..bdde40cd69d7 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -351,6 +351,8 @@ int kstrtobool(const char *s, bool *res)
return -EINVAL;
switch (s[0]) {
+ case 'e':
+ case 'E':
case 'y':
case 'Y':
case 't':
@@ -358,6 +360,8 @@ int kstrtobool(const char *s, bool *res)
case '1':
*res = true;
return 0;
+ case 'd':
+ case 'D':
case 'n':
case 'N':
case 'f':
diff --git a/lib/llist.c b/lib/llist.c
index f21d0cfbbaaa..f574c17a238e 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -14,28 +14,6 @@
#include <linux/export.h>
#include <linux/llist.h>
-
-/**
- * llist_add_batch - add several linked entries in batch
- * @new_first: first entry in batch to be added
- * @new_last: last entry in batch to be added
- * @head: the head for your lock-less list
- *
- * Return whether list is empty before adding.
- */
-bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
- struct llist_head *head)
-{
- struct llist_node *first = READ_ONCE(head->first);
-
- do {
- new_last->next = first;
- } while (!try_cmpxchg(&head->first, &first, new_first));
-
- return !first;
-}
-EXPORT_SYMBOL_GPL(llist_add_batch);
-
/**
* llist_del_first - delete the first entry of lock-less list
* @head: the head for your lock-less list
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index d0bea23fa4bc..affe979bd14d 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -211,14 +211,14 @@ static void ma_free_rcu(struct maple_node *node)
call_rcu(&node->rcu, mt_free_rcu);
}
-static void mas_set_height(struct ma_state *mas)
+static void mt_set_height(struct maple_tree *mt, unsigned char height)
{
- unsigned int new_flags = mas->tree->ma_flags;
+ unsigned int new_flags = mt->ma_flags;
new_flags &= ~MT_FLAGS_HEIGHT_MASK;
- MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
- new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
- mas->tree->ma_flags = new_flags;
+ MT_BUG_ON(mt, height > MAPLE_HEIGHT_MAX);
+ new_flags |= height << MT_FLAGS_HEIGHT_OFFSET;
+ mt->ma_flags = new_flags;
}
static unsigned int mas_mt_height(struct ma_state *mas)
@@ -1371,7 +1371,7 @@ retry:
root = mas_root(mas);
/* Tree with nodes */
if (likely(xa_is_node(root))) {
- mas->depth = 1;
+ mas->depth = 0;
mas->status = ma_active;
mas->node = mte_safe_root(root);
mas->offset = 0;
@@ -1712,9 +1712,10 @@ static inline void mas_adopt_children(struct ma_state *mas,
* node as dead.
* @mas: the maple state with the new node
* @old_enode: The old maple encoded node to replace.
+ * @new_height: if we are inserting a root node, update the height of the tree
*/
static inline void mas_put_in_tree(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, char new_height)
__must_hold(mas->tree->ma_lock)
{
unsigned char offset;
@@ -1723,7 +1724,7 @@ static inline void mas_put_in_tree(struct ma_state *mas,
if (mte_is_root(mas->node)) {
mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
- mas_set_height(mas);
+ mt_set_height(mas->tree, new_height);
} else {
offset = mte_parent_slot(mas->node);
@@ -1741,12 +1742,13 @@ static inline void mas_put_in_tree(struct ma_state *mas,
* the parent encoding to locate the maple node in the tree.
* @mas: the ma_state with @mas->node pointing to the new node.
* @old_enode: The old maple encoded node.
+ * @new_height: The new height of the tree as a result of the operation
*/
static inline void mas_replace_node(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, unsigned char new_height)
__must_hold(mas->tree->ma_lock)
{
- mas_put_in_tree(mas, old_enode);
+ mas_put_in_tree(mas, old_enode, new_height);
mas_free(mas, old_enode);
}
@@ -2536,10 +2538,11 @@ static inline void mas_topiary_node(struct ma_state *mas,
*
* @mas: The maple state pointing at the new data
* @old_enode: The maple encoded node being replaced
+ * @new_height: The new height of the tree as a result of the operation
*
*/
static inline void mas_topiary_replace(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, unsigned char new_height)
{
struct ma_state tmp[3], tmp_next[3];
MA_TOPIARY(subtrees, mas->tree);
@@ -2547,7 +2550,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
int i, n;
/* Place data in tree & then mark node as old */
- mas_put_in_tree(mas, old_enode);
+ mas_put_in_tree(mas, old_enode, new_height);
/* Update the parent pointers in the tree */
tmp[0] = *mas;
@@ -2631,14 +2634,15 @@ static inline void mas_topiary_replace(struct ma_state *mas,
* mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
* @old_enode: The old maple encoded node that is being replaced.
+ * @new_height: The new height of the tree as a result of the operation
*
* Updates gap as necessary.
*/
static inline void mas_wmb_replace(struct ma_state *mas,
- struct maple_enode *old_enode)
+ struct maple_enode *old_enode, unsigned char new_height)
{
/* Insert the new data in the tree */
- mas_topiary_replace(mas, old_enode);
+ mas_topiary_replace(mas, old_enode, new_height);
if (mte_is_leaf(mas->node))
return;
@@ -2737,7 +2741,7 @@ static inline bool mast_sufficient(struct maple_subtree_state *mast)
*/
static inline bool mast_overflow(struct maple_subtree_state *mast)
{
- if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
+ if (mast->bn->b_end > mt_slot_count(mast->orig_l->node))
return true;
return false;
@@ -2824,6 +2828,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
{
unsigned char split, mid_split;
unsigned char slot = 0;
+ unsigned char new_height = 0; /* used if node is a new root */
struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
struct maple_enode *old_enode;
@@ -2845,8 +2850,6 @@ static void mas_spanning_rebalance(struct ma_state *mas,
unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
mast_spanning_rebalance(mast);
- l_mas.depth = 0;
-
/*
* Each level of the tree is examined and balanced, pushing data to the left or
* right, or rebalancing against left or right nodes is employed to avoid
@@ -2866,6 +2869,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
+ new_height++;
/*
* Copy data from next level in the tree to mast->bn from next
@@ -2873,7 +2877,6 @@ static void mas_spanning_rebalance(struct ma_state *mas,
*/
memset(mast->bn, 0, sizeof(struct maple_big_node));
mast->bn->type = mte_node_type(left);
- l_mas.depth++;
/* Root already stored in l->node. */
if (mas_is_root_limits(mast->l))
@@ -2890,11 +2893,21 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast_combine_cp_right(mast);
mast->orig_l->last = mast->orig_l->max;
- if (mast_sufficient(mast))
- continue;
+ if (mast_sufficient(mast)) {
+ if (mast_overflow(mast))
+ continue;
+
+ if (mast->orig_l->node == mast->orig_r->node) {
+ /*
+ * The data in b_node should be stored in one
+ * node and in the tree
+ */
+ slot = mast->l->offset;
+ break;
+ }
- if (mast_overflow(mast))
continue;
+ }
/* May be a new root stored in mast->bn */
if (mas_is_root_limits(mast->orig_l))
@@ -2909,8 +2922,9 @@ static void mas_spanning_rebalance(struct ma_state *mas,
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
mte_node_type(mast->orig_l->node));
- l_mas.depth++;
+
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
+ new_height++;
mas_set_parent(mas, left, l_mas.node, slot);
if (middle)
mas_set_parent(mas, middle, l_mas.node, ++slot);
@@ -2933,7 +2947,7 @@ new_root:
mas->min = l_mas.min;
mas->max = l_mas.max;
mas->offset = l_mas.offset;
- mas_wmb_replace(mas, old_enode);
+ mas_wmb_replace(mas, old_enode, new_height);
mtree_range_walk(mas);
return;
}
@@ -3009,6 +3023,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
void __rcu **l_slots, **slots;
unsigned long *l_pivs, *pivs, gap;
bool in_rcu = mt_in_rcu(mas->tree);
+ unsigned char new_height = mas_mt_height(mas);
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
@@ -3103,7 +3118,7 @@ done:
mas_ascend(mas);
if (in_rcu) {
- mas_replace_node(mas, old_eparent);
+ mas_replace_node(mas, old_eparent, new_height);
mas_adopt_children(mas, mas->node);
}
@@ -3114,10 +3129,9 @@ done:
* mas_split_final_node() - Split the final node in a subtree operation.
* @mast: the maple subtree state
* @mas: The maple state
- * @height: The height of the tree in case it's a new root.
*/
static inline void mas_split_final_node(struct maple_subtree_state *mast,
- struct ma_state *mas, int height)
+ struct ma_state *mas)
{
struct maple_enode *ancestor;
@@ -3126,7 +3140,6 @@ static inline void mas_split_final_node(struct maple_subtree_state *mast,
mast->bn->type = maple_arange_64;
else
mast->bn->type = maple_range_64;
- mas->depth = height;
}
/*
* Only a single node is used here, could be root.
@@ -3214,7 +3227,6 @@ static inline void mast_split_data(struct maple_subtree_state *mast,
* mas_push_data() - Instead of splitting a node, it is beneficial to push the
* data to the right or left node if there is room.
* @mas: The maple state
- * @height: The current height of the maple state
* @mast: The maple subtree state
* @left: Push left or not.
*
@@ -3222,8 +3234,8 @@ static inline void mast_split_data(struct maple_subtree_state *mast,
*
* Return: True if pushed, false otherwise.
*/
-static inline bool mas_push_data(struct ma_state *mas, int height,
- struct maple_subtree_state *mast, bool left)
+static inline bool mas_push_data(struct ma_state *mas,
+ struct maple_subtree_state *mast, bool left)
{
unsigned char slot_total = mast->bn->b_end;
unsigned char end, space, split;
@@ -3280,7 +3292,7 @@ static inline bool mas_push_data(struct ma_state *mas, int height,
mast_split_data(mast, mas, split);
mast_fill_bnode(mast, mas, 2);
- mas_split_final_node(mast, mas, height + 1);
+ mas_split_final_node(mast, mas);
return true;
}
@@ -3293,6 +3305,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
{
struct maple_subtree_state mast;
int height = 0;
+ unsigned int orig_height = mas_mt_height(mas);
unsigned char mid_split, split = 0;
struct maple_enode *old;
@@ -3319,7 +3332,6 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(__func__, mas);
- mas->depth = mas_mt_height(mas);
mast.l = &l_mas;
mast.r = &r_mas;
@@ -3327,9 +3339,9 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
mast.orig_r = &prev_r_mas;
mast.bn = b_node;
- while (height++ <= mas->depth) {
+ while (height++ <= orig_height) {
if (mt_slots[b_node->type] > b_node->b_end) {
- mas_split_final_node(&mast, mas, height);
+ mas_split_final_node(&mast, mas);
break;
}
@@ -3344,11 +3356,15 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
* is a significant savings.
*/
/* Try to push left. */
- if (mas_push_data(mas, height, &mast, true))
+ if (mas_push_data(mas, &mast, true)) {
+ height++;
break;
+ }
/* Try to push right. */
- if (mas_push_data(mas, height, &mast, false))
+ if (mas_push_data(mas, &mast, false)) {
+ height++;
break;
+ }
split = mab_calc_split(mas, b_node, &mid_split);
mast_split_data(&mast, mas, split);
@@ -3365,7 +3381,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
/* Set the original node as dead */
old = mas->node;
mas->node = l_mas.node;
- mas_wmb_replace(mas, old);
+ mas_wmb_replace(mas, old, height);
mtree_range_walk(mas);
return;
}
@@ -3424,8 +3440,7 @@ static inline void mas_root_expand(struct ma_state *mas, void *entry)
if (mas->last != ULONG_MAX)
pivots[++slot] = ULONG_MAX;
- mas->depth = 1;
- mas_set_height(mas);
+ mt_set_height(mas->tree, 1);
ma_set_meta(node, maple_leaf_64, 0, slot);
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
@@ -3532,6 +3547,16 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
if (ma_is_leaf(wr_mas->type))
return true;
+ if (mas->end < mt_slots[wr_mas->type] - 1)
+ wr_mas->vacant_height = mas->depth + 1;
+
+ if (ma_is_root(mas_mn(mas))) {
+ /* root needs more than 2 entries to be sufficient + 1 */
+ if (mas->end > 2)
+ wr_mas->sufficient_height = 1;
+ } else if (mas->end > mt_min_slots[wr_mas->type] + 1)
+ wr_mas->sufficient_height = mas->depth + 1;
+
mas_wr_walk_traverse(wr_mas);
}
@@ -3669,8 +3694,7 @@ static inline void mas_new_root(struct ma_state *mas, void *entry)
WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX);
if (!entry) {
- mas->depth = 0;
- mas_set_height(mas);
+ mt_set_height(mas->tree, 0);
rcu_assign_pointer(mas->tree->ma_root, entry);
mas->status = ma_start;
goto done;
@@ -3684,8 +3708,7 @@ static inline void mas_new_root(struct ma_state *mas, void *entry)
mas->status = ma_active;
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
- mas->depth = 1;
- mas_set_height(mas);
+ mt_set_height(mas->tree, 1);
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
done:
@@ -3804,6 +3827,7 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
struct maple_node reuse, *newnode;
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
+ unsigned char height = mas_mt_height(mas);
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
@@ -3860,7 +3884,7 @@ done:
struct maple_enode *old_enode = mas->node;
mas->node = mt_mk_node(newnode, wr_mas->type);
- mas_replace_node(mas, old_enode);
+ mas_replace_node(mas, old_enode, height);
} else {
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
@@ -4059,15 +4083,6 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
unsigned char new_end = mas_wr_new_end(wr_mas);
switch (mas->store_type) {
- case wr_invalid:
- MT_BUG_ON(mas->tree, 1);
- return;
- case wr_new_root:
- mas_new_root(mas, wr_mas->entry);
- break;
- case wr_store_root:
- mas_store_root(mas, wr_mas->entry);
- break;
case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
if (!!wr_mas->entry ^ !!wr_mas->content)
@@ -4089,6 +4104,14 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
case wr_rebalance:
mas_wr_bnode(wr_mas);
break;
+ case wr_new_root:
+ mas_new_root(mas, wr_mas->entry);
+ break;
+ case wr_store_root:
+ mas_store_root(mas, wr_mas->entry);
+ break;
+ case wr_invalid:
+ MT_BUG_ON(mas->tree, 1);
}
return;
@@ -4140,18 +4163,41 @@ set_content:
/**
* mas_prealloc_calc() - Calculate number of nodes needed for a
* given store oepration
- * @mas: The maple state
+ * @wr_mas: The maple write state
* @entry: The entry to store into the tree
*
* Return: Number of nodes required for preallocation.
*/
-static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
+static inline int mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry)
{
- int ret = mas_mt_height(mas) * 3 + 1;
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char height = mas_mt_height(mas);
+ int ret = height * 3 + 1;
+ unsigned char delta = height - wr_mas->vacant_height;
switch (mas->store_type) {
- case wr_invalid:
- WARN_ON_ONCE(1);
+ case wr_exact_fit:
+ case wr_append:
+ case wr_slot_store:
+ ret = 0;
+ break;
+ case wr_spanning_store:
+ if (wr_mas->sufficient_height < wr_mas->vacant_height)
+ ret = (height - wr_mas->sufficient_height) * 3 + 1;
+ else
+ ret = delta * 3 + 1;
+ break;
+ case wr_split_store:
+ ret = delta * 2 + 1;
+ break;
+ case wr_rebalance:
+ if (wr_mas->sufficient_height < wr_mas->vacant_height)
+ ret = (height - wr_mas->sufficient_height) * 2 + 1;
+ else
+ ret = delta * 2 + 1;
+ break;
+ case wr_node_store:
+ ret = mt_in_rcu(mas->tree) ? 1 : 0;
break;
case wr_new_root:
ret = 1;
@@ -4164,22 +4210,8 @@ static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
else
ret = 0;
break;
- case wr_spanning_store:
- ret = mas_mt_height(mas) * 3 + 1;
- break;
- case wr_split_store:
- ret = mas_mt_height(mas) * 2 + 1;
- break;
- case wr_rebalance:
- ret = mas_mt_height(mas) * 2 - 1;
- break;
- case wr_node_store:
- ret = mt_in_rcu(mas->tree) ? 1 : 0;
- break;
- case wr_append:
- case wr_exact_fit:
- case wr_slot_store:
- ret = 0;
+ case wr_invalid:
+ WARN_ON_ONCE(1);
}
return ret;
@@ -4243,16 +4275,15 @@ static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
*/
static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
{
- struct ma_state *mas = wr_mas->mas;
int request;
mas_wr_prealloc_setup(wr_mas);
- mas->store_type = mas_wr_store_type(wr_mas);
- request = mas_prealloc_calc(mas, entry);
+ wr_mas->mas->store_type = mas_wr_store_type(wr_mas);
+ request = mas_prealloc_calc(wr_mas, entry);
if (!request)
return;
- mas_node_count(mas, request);
+ mas_node_count(wr_mas->mas, request);
}
/**
@@ -5397,7 +5428,7 @@ void *mas_store(struct ma_state *mas, void *entry)
return wr_mas.content;
}
- request = mas_prealloc_calc(mas, entry);
+ request = mas_prealloc_calc(&wr_mas, entry);
if (!request)
goto store;
@@ -5494,7 +5525,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
mas_wr_prealloc_setup(&wr_mas);
mas->store_type = mas_wr_store_type(&wr_mas);
- request = mas_prealloc_calc(mas, entry);
+ request = mas_prealloc_calc(&wr_mas, entry);
if (!request)
return ret;
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index fe6705cfd780..9b757a117f09 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -117,7 +117,7 @@ int parse_OID(const void *data, size_t datasize, enum OID *oid)
EXPORT_SYMBOL_GPL(parse_OID);
/*
- * sprint_OID - Print an Object Identifier into a buffer
+ * sprint_oid - Print an Object Identifier into a buffer
* @data: The encoded OID to print
* @datasize: The size of the encoded OID
* @buffer: The buffer to render into
@@ -173,26 +173,3 @@ bad:
return -EBADMSG;
}
EXPORT_SYMBOL_GPL(sprint_oid);
-
-/**
- * sprint_OID - Print an Object Identifier into a buffer
- * @oid: The OID to print
- * @buffer: The buffer to render into
- * @bufsize: The size of the buffer
- *
- * The OID is rendered into the buffer in "a.b.c.d" format and the number of
- * bytes is returned.
- */
-int sprint_OID(enum OID oid, char *buffer, size_t bufsize)
-{
- int ret;
-
- BUG_ON(oid >= OID__NR);
-
- ret = sprint_oid(oid_data + oid_index[oid],
- oid_index[oid + 1] - oid_index[oid],
- buffer, bufsize);
- BUG_ON(ret == -EBADMSG);
- return ret;
-}
-EXPORT_SYMBOL_GPL(sprint_OID);
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index cd2e88ee1f14..dfd3f800ac9b 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -28,10 +28,8 @@ EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = {
#if defined(__i386__) && !defined(__arch_um__)
-#ifdef CONFIG_AS_AVX512
&raid6_avx512x2,
&raid6_avx512x1,
-#endif
&raid6_avx2x2,
&raid6_avx2x1,
&raid6_sse2x2,
@@ -42,11 +40,9 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_mmxx1,
#endif
#if defined(__x86_64__) && !defined(__arch_um__)
-#ifdef CONFIG_AS_AVX512
&raid6_avx512x4,
&raid6_avx512x2,
&raid6_avx512x1,
-#endif
&raid6_avx2x4,
&raid6_avx2x2,
&raid6_avx2x1,
@@ -96,9 +92,7 @@ EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
#ifdef CONFIG_X86
-#ifdef CONFIG_AS_AVX512
&raid6_recov_avx512,
-#endif
&raid6_recov_avx2,
&raid6_recov_ssse3,
#endif
diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c
index 9c3e822e1adf..009bd0adeebf 100644
--- a/lib/raid6/avx512.c
+++ b/lib/raid6/avx512.c
@@ -17,8 +17,6 @@
*
*/
-#ifdef CONFIG_AS_AVX512
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -560,5 +558,3 @@ const struct raid6_calls raid6_avx512x4 = {
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#endif
-
-#endif /* CONFIG_AS_AVX512 */
diff --git a/lib/raid6/recov_avx512.c b/lib/raid6/recov_avx512.c
index fd9e15bf3f30..310c715db313 100644
--- a/lib/raid6/recov_avx512.c
+++ b/lib/raid6/recov_avx512.c
@@ -6,8 +6,6 @@
* Author: Megha Dey <megha.dey@linux.intel.com>
*/
-#ifdef CONFIG_AS_AVX512
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -377,7 +375,3 @@ const struct raid6_recov_calls raid6_recov_avx512 = {
#endif
.priority = 3,
};
-
-#else
-#warning "your version of binutils lacks AVX512 support"
-#endif
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 2abe0076a636..8f2dd2210ba8 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -54,9 +54,6 @@ endif
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += -DCONFIG_X86
- CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
- gcc -c -x assembler - >/dev/null 2>&1 && \
- rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 989c2d615f92..5114eda6309c 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -297,9 +297,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* N S --> N sl
* / \ \
- * sl sr S
+ * sl Sr S
* \
- * sr
+ * Sr
*
* Note: p might be red, and then both
* p and sl are red after rotation(which
@@ -312,9 +312,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* N sl --> P S
* \ / \
- * S N sr
+ * S N Sr
* \
- * sr
+ * Sr
*/
tmp1 = tmp2->rb_right;
WRITE_ONCE(sibling->rb_left, tmp1);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b58d5ef1a34b..7582dfab7fe3 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -14,29 +14,6 @@
#include <linux/folio_queue.h>
/**
- * sg_next - return the next scatterlist entry in a list
- * @sg: The current sg entry
- *
- * Description:
- * Usually the next entry will be @sg@ + 1, but if this sg element is part
- * of a chained scatterlist, it could jump to the start of a new
- * scatterlist array.
- *
- **/
-struct scatterlist *sg_next(struct scatterlist *sg)
-{
- if (sg_is_last(sg))
- return NULL;
-
- sg++;
- if (unlikely(sg_is_chain(sg)))
- sg = sg_chain_ptr(sg);
-
- return sg;
-}
-EXPORT_SYMBOL(sg_next);
-
-/**
* sg_nents - return total count of entries in scatterlist
* @sg: The scatterlist
*
diff --git a/lib/test_fortify/Makefile b/lib/test_fortify/Makefile
index 1c3f82ad8bb2..399cae880e1d 100644
--- a/lib/test_fortify/Makefile
+++ b/lib/test_fortify/Makefile
@@ -18,10 +18,7 @@ quiet_cmd_gen_fortify_log = CAT $@
$(obj)/test_fortify.log: $(addprefix $(obj)/, $(logs)) FORCE
$(call if_changed,gen_fortify_log)
-# GCC<=7 does not always produce *.d files.
-# Run the tests only for GCC>=8 or Clang.
-always-$(call gcc-min-version, 80000) += test_fortify.log
-always-$(CONFIG_CC_IS_CLANG) += test_fortify.log
+always-y += test_fortify.log
# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
# Pass CFLAGS_KASAN to avoid warnings.
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 064ed0fce75a..f0dd092860ea 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -28,14 +28,20 @@
#define TEST_START_NUM_THREADS 50
#define TEST_START_DRIVER "test_module"
-#define TEST_START_TEST_FS "xfs"
#define TEST_START_TEST_CASE TEST_KMOD_DRIVER
-
static bool force_init_test = false;
-module_param(force_init_test, bool_enable_only, 0644);
+module_param(force_init_test, bool_enable_only, 0444);
MODULE_PARM_DESC(force_init_test,
"Force kicking a test immediately after driver loads");
+static char *start_driver;
+module_param(start_driver, charp, 0444);
+MODULE_PARM_DESC(start_driver,
+ "Module/driver to use for the testing after driver loads");
+static char *start_test_fs;
+module_param(start_test_fs, charp, 0444);
+MODULE_PARM_DESC(start_test_fs,
+ "File system to use for the testing after driver loads");
/*
* For device allocation / registration
@@ -508,6 +514,11 @@ static int __trigger_config_run(struct kmod_test_device *test_dev)
case TEST_KMOD_DRIVER:
return run_test_driver(test_dev);
case TEST_KMOD_FS_TYPE:
+ if (!config->test_fs) {
+ dev_warn(test_dev->dev,
+ "No fs type specified, can't run the test\n");
+ return -EINVAL;
+ }
return run_test_fs_type(test_dev);
default:
dev_warn(test_dev->dev,
@@ -721,26 +732,20 @@ static ssize_t config_test_fs_show(struct device *dev,
static DEVICE_ATTR_RW(config_test_fs);
static int trigger_config_run_type(struct kmod_test_device *test_dev,
- enum kmod_test_case test_case,
- const char *test_str)
+ enum kmod_test_case test_case)
{
- int copied = 0;
struct test_config *config = &test_dev->config;
mutex_lock(&test_dev->config_mutex);
switch (test_case) {
case TEST_KMOD_DRIVER:
- kfree_const(config->test_driver);
- config->test_driver = NULL;
- copied = config_copy_test_driver_name(config, test_str,
- strlen(test_str));
break;
case TEST_KMOD_FS_TYPE:
- kfree_const(config->test_fs);
- config->test_fs = NULL;
- copied = config_copy_test_fs(config, test_str,
- strlen(test_str));
+ if (!config->test_fs) {
+ mutex_unlock(&test_dev->config_mutex);
+ return 0;
+ }
break;
default:
mutex_unlock(&test_dev->config_mutex);
@@ -751,11 +756,6 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
mutex_unlock(&test_dev->config_mutex);
- if (copied <= 0 || copied != strlen(test_str)) {
- test_dev->test_is_oom = true;
- return -ENOMEM;
- }
-
test_dev->test_is_oom = false;
return trigger_config_run(test_dev);
@@ -800,19 +800,24 @@ static unsigned int kmod_init_test_thread_limit(void)
static int __kmod_config_init(struct kmod_test_device *test_dev)
{
struct test_config *config = &test_dev->config;
+ const char *test_start_driver = start_driver ? start_driver :
+ TEST_START_DRIVER;
int ret = -ENOMEM, copied;
__kmod_config_free(config);
- copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
- strlen(TEST_START_DRIVER));
- if (copied != strlen(TEST_START_DRIVER))
+ copied = config_copy_test_driver_name(config, test_start_driver,
+ strlen(test_start_driver));
+ if (copied != strlen(test_start_driver))
goto err_out;
- copied = config_copy_test_fs(config, TEST_START_TEST_FS,
- strlen(TEST_START_TEST_FS));
- if (copied != strlen(TEST_START_TEST_FS))
- goto err_out;
+
+ if (start_test_fs) {
+ copied = config_copy_test_fs(config, start_test_fs,
+ strlen(start_test_fs));
+ if (copied != strlen(start_test_fs))
+ goto err_out;
+ }
config->num_threads = kmod_init_test_thread_limit();
config->test_result = 0;
@@ -1178,12 +1183,11 @@ static int __init test_kmod_init(void)
* lowering the init level for more fun.
*/
if (force_init_test) {
- ret = trigger_config_run_type(test_dev,
- TEST_KMOD_DRIVER, "tun");
+ ret = trigger_config_run_type(test_dev, TEST_KMOD_DRIVER);
if (WARN_ON(ret))
return ret;
- ret = trigger_config_run_type(test_dev,
- TEST_KMOD_FS_TYPE, "btrfs");
+
+ ret = trigger_config_run_type(test_dev, TEST_KMOD_FS_TYPE);
if (WARN_ON(ret))
return ret;
}
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index f585949ff696..1b0b59549aaf 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -13,9 +13,9 @@
#include <linux/moduleparam.h>
#include <linux/completion.h>
#include <linux/delay.h>
-#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/rcupdate.h>
+#include <linux/srcu.h>
#include <linux/slab.h>
#define __param(type, name, init, msg) \
@@ -58,10 +58,9 @@ __param(int, run_test_mask, INT_MAX,
);
/*
- * Read write semaphore for synchronization of setup
- * phase that is done in main thread and workers.
+ * This is for synchronization of setup phase.
*/
-static DECLARE_RWSEM(prepare_for_test_rwsem);
+DEFINE_STATIC_SRCU(prepare_for_test_srcu);
/*
* Completion tracking for worker threads.
@@ -458,7 +457,7 @@ static int test_func(void *private)
/*
* Block until initialization is done.
*/
- down_read(&prepare_for_test_rwsem);
+ synchronize_srcu(&prepare_for_test_srcu);
t->start = get_cycles();
for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
@@ -487,8 +486,6 @@ static int test_func(void *private)
t->data[index].time = delta;
}
t->stop = get_cycles();
-
- up_read(&prepare_for_test_rwsem);
test_report_one_done();
/*
@@ -526,7 +523,7 @@ init_test_configuration(void)
static void do_concurrent_test(void)
{
- int i, ret;
+ int i, ret, idx;
/*
* Set some basic configurations plus sanity check.
@@ -538,7 +535,7 @@ static void do_concurrent_test(void)
/*
* Put on hold all workers.
*/
- down_write(&prepare_for_test_rwsem);
+ idx = srcu_read_lock(&prepare_for_test_srcu);
for (i = 0; i < nr_threads; i++) {
struct test_driver *t = &tdriver[i];
@@ -555,7 +552,7 @@ static void do_concurrent_test(void)
/*
* Now let the workers do their job.
*/
- up_write(&prepare_for_test_rwsem);
+ srcu_read_unlock(&prepare_for_test_srcu, idx);
/*
* Sleep quiet until all workers are done with 1 second
@@ -594,10 +591,11 @@ static void do_concurrent_test(void)
kvfree(tdriver);
}
-static int vmalloc_test_init(void)
+static int __init vmalloc_test_init(void)
{
do_concurrent_test();
- return -EAGAIN; /* Fail will directly unload the module */
+ /* Fail will directly unload the module */
+ return IS_BUILTIN(CONFIG_TEST_VMALLOC) ? 0:-EAGAIN;
}
module_init(vmalloc_test_init)
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 080a39d22e73..5ca0aefee9aa 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1040,6 +1040,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
unsigned int i, id;
unsigned long index;
void *entry;
+ int ret;
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
&next, GFP_KERNEL) != 0);
@@ -1059,7 +1060,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
else
entry = xa_mk_index(i - 0x3fff);
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
- &next, GFP_KERNEL) != (id == 1));
+ &next, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_mk_index(id) != entry);
}
@@ -1072,7 +1073,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
xa_limit_32b, &next, GFP_KERNEL) != 0);
XA_BUG_ON(xa, id != UINT_MAX);
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
- xa_limit_32b, &next, GFP_KERNEL) != 1);
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
XA_BUG_ON(xa, id != base);
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
xa_limit_32b, &next, GFP_KERNEL) != 0);
@@ -1080,7 +1081,19 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
xa_for_each(xa, index, entry)
xa_erase_index(xa, index);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ /* check wrap-around return of __xa_alloc_cyclic() */
+ next = UINT_MAX;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ xa_lock(xa);
+ ret = __xa_alloc_cyclic(xa, &id, xa_mk_index(base), xa_limit_32b,
+ &next, GFP_KERNEL);
+ xa_unlock(xa);
+ XA_BUG_ON(xa, ret != 1);
+ xa_for_each(xa, index, entry)
+ xa_erase_index(xa, index);
XA_BUG_ON(xa, !xa_empty(xa));
}
diff --git a/lib/tests/stackinit_kunit.c b/lib/tests/stackinit_kunit.c
index 63aa78e6f5c1..ff2784769772 100644
--- a/lib/tests/stackinit_kunit.c
+++ b/lib/tests/stackinit_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test cases for compiler-based stack variable zeroing via
- * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * -ftrivial-auto-var-init={zero,pattern}.
* For example, see:
* "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
* ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
@@ -376,14 +376,6 @@ union test_small_end {
# define USER_PASS XFAIL
# define BYREF_PASS XFAIL
# define STRONG_PASS XFAIL
-#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
-# define USER_PASS WANT_SUCCESS
-# define BYREF_PASS XFAIL
-# define STRONG_PASS XFAIL
-#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
-# define USER_PASS WANT_SUCCESS
-# define BYREF_PASS WANT_SUCCESS
-# define STRONG_PASS XFAIL
#else
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS WANT_SUCCESS
diff --git a/lib/xarray.c b/lib/xarray.c
index 9644b18af18d..76dde3a1cacf 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1742,20 +1742,23 @@ static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp);
/**
- * __xa_cmpxchg() - Store this entry in the XArray.
+ * __xa_cmpxchg() - Conditionally replace an entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
- * @entry: New entry.
+ * @entry: New value to place in array.
* @gfp: Memory allocation flags.
*
* You must already be holding the xa_lock when calling this function.
* It will drop the lock if needed to allocate memory, and then reacquire
* it afterwards.
*
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
- * Return: The old entry at this index or xa_err() if an error happened.
+ * Return: The old value at this index or xa_err() if an error happened.
*/
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)