summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-03-24 16:15:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-03-24 16:15:47 -0700
commit05b00ffd7a0bf31f45b63242f30b3a8a0008fa78 (patch)
tree4b74c62ce9f6a8d840f47a1e8123aca8c27416b9 /lib
parent95c61e1a9c924481c0828fbd7b9e0432741b7472 (diff)
parentdea2d9221e83ea02b45a60ab88284cd3bb4bb2a4 (diff)
Merge tag 'slab-for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - Move the TINY_RCU kvfree_rcu() implementation from RCU to SLAB subsystem and cleanup its integration (Vlastimil Babka) Following the move of the TREE_RCU batching kvfree_rcu() implementation in 6.14, move also the simpler TINY_RCU variant. Refactor the #ifdef guards so that the simple implementation is also used with SLUB_TINY. Remove the need for RCU to recognize fake callback function pointers (__is_kvfree_rcu_offset()) when handling call_rcu() by implementing a callback that calculates the object's address from the embedded rcu_head address without knowing its offset. - Improve kmalloc cache randomization in kvmalloc (GONG Ruiqi) Due to an extra layer of function call, all kvmalloc() allocations used the same set of random caches. Thanks to moving the kvmalloc() implementation to slub.c, this is improved and randomization now works for kvmalloc. - Various improvements to debugging, testing and other cleanups (Hyesoo Yu, Lilith Gkini, Uladzislau Rezki, Matthew Wilcox, Kevin Brodsky, Ye Bin) * tag 'slab-for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: slub: Handle freelist cycle in on_freelist() mm/slab: call kmalloc_noprof() unconditionally in kmalloc_array_noprof() slab: Mark large folios for debugging purposes kunit, slub: Add test_kfree_rcu_wq_destroy use case mm, slab: cleanup slab_bug() parameters mm: slub: call WARN() when detecting a slab corruption mm: slub: Print the broken data before restoring them slab: Achieve better kmalloc caches randomization in kvmalloc slab: Adjust placement of __kvmalloc_node_noprof mm/slab: simplify SLAB_* flag handling slab: don't batch kvfree_rcu() with SLUB_TINY rcu, slab: use a regular callback function for kvfree_rcu rcu: remove trace_rcu_kvfree_callback slab, rcu: move TINY_RCU variant of kvfree_rcu() to SLAB
Diffstat (limited to 'lib')
-rw-r--r--lib/tests/slub_kunit.c59
1 files changed, 59 insertions, 0 deletions
diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c
index f11691315c2f..d47c472b0520 100644
--- a/lib/tests/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
+#include <linux/delay.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@@ -181,6 +182,63 @@ static void test_kfree_rcu(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, slab_errors);
}
+struct cache_destroy_work {
+ struct work_struct work;
+ struct kmem_cache *s;
+};
+
+static void cache_destroy_workfn(struct work_struct *w)
+{
+ struct cache_destroy_work *cdw;
+
+ cdw = container_of(w, struct cache_destroy_work, work);
+ kmem_cache_destroy(cdw->s);
+}
+
+#define KMEM_CACHE_DESTROY_NR 10
+
+static void test_kfree_rcu_wq_destroy(struct kunit *test)
+{
+ struct test_kfree_rcu_struct *p;
+ struct cache_destroy_work cdw;
+ struct workqueue_struct *wq;
+ struct kmem_cache *s;
+ unsigned int delay;
+ int i;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
+ wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+
+ if (!wq)
+ kunit_skip(test, "failed to alloc wq");
+
+ for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
+ s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+
+ if (!s)
+ kunit_skip(test, "failed to create cache");
+
+ delay = get_random_u8();
+ p = kmem_cache_alloc(s, GFP_KERNEL);
+ kfree_rcu(p, rcu);
+
+ cdw.s = s;
+
+ msleep(delay);
+ queue_work(wq, &cdw.work);
+ flush_work(&cdw.work);
+ }
+
+ destroy_workqueue(wq);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
static void test_leak_destroy(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
@@ -254,6 +312,7 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
KUNIT_CASE(test_kfree_rcu),
+ KUNIT_CASE(test_kfree_rcu_wq_destroy),
KUNIT_CASE(test_leak_destroy),
KUNIT_CASE(test_krealloc_redzone_zeroing),
{}