diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-04-01 10:06:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-04-01 10:06:52 -0700 |
commit | d6b02199cde4b9cb99b311eeab1cdbe23165082c (patch) | |
tree | 408f91cb2a6241098bd42f04bd15b2848953ea28 /lib | |
parent | eb0ece16027f8223d5dc9aaf90124f70577bd22a (diff) | |
parent | 434333dd3f66f9d1ad387dabd2a565182a823f31 (diff) |
Merge tag 'mm-nonmm-stable-2025-03-30-18-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull non-MM updates from Andrew Morton:
- The series "powerpc/crash: use generic crashkernel reservation" from
Sourabh Jain changes powerpc's kexec code to use more of the generic
layers.
- The series "get_maintainer: report subsystem status separately" from
Vlastimil Babka makes some long-requested improvements to the
get_maintainer output.
- The series "ucount: Simplify refcounting with rcuref_t" from
Sebastian Siewior cleans up and optimizing the refcounting in the
ucount code.
- The series "reboot: support runtime configuration of emergency
hw_protection action" from Ahmad Fatoum improves the ability for a
driver to perform an emergency system shutdown or reboot.
- The series "Converge on using secs_to_jiffies() part two" from Easwar
Hariharan performs further migrations from msecs_to_jiffies() to
secs_to_jiffies().
- The series "lib/interval_tree: add some test cases and cleanup" from
Wei Yang permits more userspace testing of kernel library code, adds
some more tests and performs some cleanups.
- The series "hung_task: Dump the blocking task stacktrace" from Masami
Hiramatsu arranges for the hung_task detector to dump the stack of
the blocking task and not just that of the blocked task.
- The series "resource: Split and use DEFINE_RES*() macros" from Andy
Shevchenko provides some cleanups to the resource definition macros.
- Plus the usual shower of singleton patches - please see the
individual changelogs for details.
* tag 'mm-nonmm-stable-2025-03-30-18-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (77 commits)
mailmap: consolidate email addresses of Alexander Sverdlin
fs/procfs: fix the comment above proc_pid_wchan()
relay: use kasprintf() instead of fixed buffer formatting
resource: replace open coded variant of DEFINE_RES()
resource: replace open coded variants of DEFINE_RES_*_NAMED()
resource: replace open coded variant of DEFINE_RES_NAMED_DESC()
resource: split DEFINE_RES_NAMED_DESC() out of DEFINE_RES_NAMED()
samples: add hung_task detector mutex blocking sample
hung_task: show the blocker task if the task is hung on mutex
kexec_core: accept unaccepted kexec segments' destination addresses
watchdog/perf: optimize bytes copied and remove manual NUL-termination
lib/interval_tree: fix the comment of interval_tree_span_iter_next_gap()
lib/interval_tree: skip the check before go to the right subtree
lib/interval_tree: add test case for span iteration
lib/interval_tree: add test case for interval_tree_iter_xxx() helpers
lib/rbtree: add random seed
lib/rbtree: split tests
lib/rbtree: enable userland test suite for rbtree related data structure
checkpatch: describe --min-conf-desc-length
scripts/gdb/symbols: determine KASLR offset on s390
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 11 | ||||
-rw-r--r-- | lib/interval_tree.c | 12 | ||||
-rw-r--r-- | lib/interval_tree_test.c | 237 | ||||
-rw-r--r-- | lib/min_heap.c | 4 | ||||
-rw-r--r-- | lib/plist.c | 12 | ||||
-rw-r--r-- | lib/rbtree_test.c | 30 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate.c | 6 |
7 files changed, 282 insertions, 30 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0ffd5526bd46..2e1345c38f2f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1280,6 +1280,17 @@ config BOOTPARAM_HUNG_TASK_PANIC Say N if unsure. +config DETECT_HUNG_TASK_BLOCKER + bool "Dump Hung Tasks Blocker" + depends on DETECT_HUNG_TASK + depends on !PREEMPT_RT + default y + help + Say Y here to show the blocker task's stacktrace who acquires + the mutex lock which "hung tasks" are waiting. + This will add overhead a bit but shows suspicious tasks and + call trace if it comes from waiting a mutex. + config WQ_WATCHDOG bool "Detect Workqueue Stalls" depends on DEBUG_KERNEL diff --git a/lib/interval_tree.c b/lib/interval_tree.c index 3412737ff365..324766e9bf63 100644 --- a/lib/interval_tree.c +++ b/lib/interval_tree.c @@ -20,9 +20,15 @@ EXPORT_SYMBOL_GPL(interval_tree_iter_next); /* * Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous * span of nodes. This makes nodes[0]->last the end of that contiguous used span - * indexes that started at the original nodes[1]->start. nodes[1] is now the - * first node starting the next used span. A hole span is between nodes[0]->last - * and nodes[1]->start. nodes[1] must be !NULL. + * of indexes that started at the original nodes[1]->start. + * + * If there is an interior hole, nodes[1] is now the first node starting the + * next used span. A hole span is between nodes[0]->last and nodes[1]->start. + * + * If there is a tailing hole, nodes[1] is now NULL. A hole span is between + * nodes[0]->last and last_index. + * + * If the contiguous used range span to last_index, nodes[1] is set to NULL. */ static void interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state) diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c index 837064b83a6c..5fd62656f42e 100644 --- a/lib/interval_tree_test.c +++ b/lib/interval_tree_test.c @@ -5,6 +5,8 @@ #include <linux/prandom.h> #include <linux/slab.h> #include <asm/timex.h> +#include <linux/bitmap.h> +#include <linux/maple_tree.h> #define __param(type, name, init, msg) \ static type name = init; \ @@ -19,6 +21,7 @@ __param(int, search_loops, 1000, "Number of iterations searching the tree"); __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); +__param(ullong, seed, 3141592653589793238ULL, "Random seed"); static struct rb_root_cached root = RB_ROOT_CACHED; static struct interval_tree_node *nodes = NULL; @@ -59,26 +62,13 @@ static void init(void) queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint; } -static int interval_tree_test_init(void) +static int basic_check(void) { int i, j; - unsigned long results; cycles_t time1, time2, time; - nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), - GFP_KERNEL); - if (!nodes) - return -ENOMEM; - - queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL); - if (!queries) { - kfree(nodes); - return -ENOMEM; - } - printk(KERN_ALERT "interval tree insert/remove"); - prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); @@ -96,8 +86,19 @@ static int interval_tree_test_init(void) time = div_u64(time, perf_loops); printk(" -> %llu cycles\n", (unsigned long long)time); + return 0; +} + +static int search_check(void) +{ + int i, j; + unsigned long results; + cycles_t time1, time2, time; + printk(KERN_ALERT "interval tree search"); + init(); + for (j = 0; j < nnodes; j++) interval_tree_insert(nodes + j, &root); @@ -120,6 +121,214 @@ static int interval_tree_test_init(void) printk(" -> %llu cycles (%lu results)\n", (unsigned long long)time, results); + for (j = 0; j < nnodes; j++) + interval_tree_remove(nodes + j, &root); + + return 0; +} + +static int intersection_range_check(void) +{ + int i, j, k; + unsigned long start, last; + struct interval_tree_node *node; + unsigned long *intxn1; + unsigned long *intxn2; + + printk(KERN_ALERT "interval tree iteration\n"); + + intxn1 = bitmap_alloc(nnodes, GFP_KERNEL); + if (!intxn1) { + WARN_ON_ONCE("Failed to allocate intxn1\n"); + return -ENOMEM; + } + + intxn2 = bitmap_alloc(nnodes, GFP_KERNEL); + if (!intxn2) { + WARN_ON_ONCE("Failed to allocate intxn2\n"); + bitmap_free(intxn1); + return -ENOMEM; + } + + for (i = 0; i < search_loops; i++) { + /* Initialize interval tree for each round */ + init(); + for (j = 0; j < nnodes; j++) + interval_tree_insert(nodes + j, &root); + + /* Let's try nsearches different ranges */ + for (k = 0; k < nsearches; k++) { + /* Try whole range once */ + if (!k) { + start = 0UL; + last = ULONG_MAX; + } else { + last = (prandom_u32_state(&rnd) >> 4) % max_endpoint; + start = (prandom_u32_state(&rnd) >> 4) % last; + } + + /* Walk nodes to mark intersection nodes */ + bitmap_zero(intxn1, nnodes); + for (j = 0; j < nnodes; j++) { + node = nodes + j; + + if (start <= node->last && last >= node->start) + bitmap_set(intxn1, j, 1); + } + + /* Iterate tree to clear intersection nodes */ + bitmap_zero(intxn2, nnodes); + for (node = interval_tree_iter_first(&root, start, last); node; + node = interval_tree_iter_next(node, start, last)) + bitmap_set(intxn2, node - nodes, 1); + + WARN_ON_ONCE(!bitmap_equal(intxn1, intxn2, nnodes)); + } + + for (j = 0; j < nnodes; j++) + interval_tree_remove(nodes + j, &root); + } + + bitmap_free(intxn1); + bitmap_free(intxn2); + return 0; +} + +#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER +/* + * Helper function to get span of current position from maple tree point of + * view. + */ +static void mas_cur_span(struct ma_state *mas, struct interval_tree_span_iter *state) +{ + unsigned long cur_start; + unsigned long cur_last; + int is_hole; + + if (mas->status == ma_overflow) + return; + + /* walk to current position */ + state->is_hole = mas_walk(mas) ? 0 : 1; + + cur_start = mas->index < state->first_index ? + state->first_index : mas->index; + + /* whether we have followers */ + do { + + cur_last = mas->last > state->last_index ? + state->last_index : mas->last; + + is_hole = mas_next_range(mas, state->last_index) ? 0 : 1; + + } while (mas->status != ma_overflow && is_hole == state->is_hole); + + if (state->is_hole) { + state->start_hole = cur_start; + state->last_hole = cur_last; + } else { + state->start_used = cur_start; + state->last_used = cur_last; + } + + /* advance position for next round */ + if (mas->status != ma_overflow) + mas_set(mas, cur_last + 1); +} + +static int span_iteration_check(void) +{ + int i, j, k; + unsigned long start, last; + struct interval_tree_span_iter span, mas_span; + + DEFINE_MTREE(tree); + + MA_STATE(mas, &tree, 0, 0); + + printk(KERN_ALERT "interval tree span iteration\n"); + + for (i = 0; i < search_loops; i++) { + /* Initialize interval tree for each round */ + init(); + for (j = 0; j < nnodes; j++) + interval_tree_insert(nodes + j, &root); + + /* Put all the range into maple tree */ + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + mt_set_in_rcu(&tree); + + for (j = 0; j < nnodes; j++) + WARN_ON_ONCE(mtree_store_range(&tree, nodes[j].start, + nodes[j].last, nodes + j, GFP_KERNEL)); + + /* Let's try nsearches different ranges */ + for (k = 0; k < nsearches; k++) { + /* Try whole range once */ + if (!k) { + start = 0UL; + last = ULONG_MAX; + } else { + last = (prandom_u32_state(&rnd) >> 4) % max_endpoint; + start = (prandom_u32_state(&rnd) >> 4) % last; + } + + mas_span.first_index = start; + mas_span.last_index = last; + mas_span.is_hole = -1; + mas_set(&mas, start); + + interval_tree_for_each_span(&span, &root, start, last) { + mas_cur_span(&mas, &mas_span); + + WARN_ON_ONCE(span.is_hole != mas_span.is_hole); + + if (span.is_hole) { + WARN_ON_ONCE(span.start_hole != mas_span.start_hole); + WARN_ON_ONCE(span.last_hole != mas_span.last_hole); + } else { + WARN_ON_ONCE(span.start_used != mas_span.start_used); + WARN_ON_ONCE(span.last_used != mas_span.last_used); + } + } + + } + + WARN_ON_ONCE(mas.status != ma_overflow); + + /* Cleanup maple tree for each round */ + mtree_destroy(&tree); + /* Cleanup interval tree for each round */ + for (j = 0; j < nnodes; j++) + interval_tree_remove(nodes + j, &root); + } + return 0; +} +#else +static inline int span_iteration_check(void) {return 0; } +#endif + +static int interval_tree_test_init(void) +{ + nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), + GFP_KERNEL); + if (!nodes) + return -ENOMEM; + + queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL); + if (!queries) { + kfree(nodes); + return -ENOMEM; + } + + prandom_seed_state(&rnd, seed); + + basic_check(); + search_check(); + intersection_range_check(); + span_iteration_check(); + kfree(queries); kfree(nodes); diff --git a/lib/min_heap.c b/lib/min_heap.c index 4485372ff3b1..96f01a4c5fb6 100644 --- a/lib/min_heap.c +++ b/lib/min_heap.c @@ -2,7 +2,7 @@ #include <linux/export.h> #include <linux/min_heap.h> -void __min_heap_init(min_heap_char *heap, void *data, int size) +void __min_heap_init(min_heap_char *heap, void *data, size_t size) { __min_heap_init_inline(heap, data, size); } @@ -20,7 +20,7 @@ bool __min_heap_full(min_heap_char *heap) } EXPORT_SYMBOL(__min_heap_full); -void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, +void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size, const struct min_heap_callbacks *func, void *args) { __min_heap_sift_down_inline(heap, pos, elem_size, func, args); diff --git a/lib/plist.c b/lib/plist.c index c6bce1226874..330febb4bd7d 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -171,12 +171,24 @@ void plist_requeue(struct plist_node *node, struct plist_head *head) plist_del(node, head); + /* + * After plist_del(), iter is the replacement of the node. If the node + * was on prio_list, take shortcut to find node_next instead of looping. + */ + if (!list_empty(&iter->prio_list)) { + iter = list_entry(iter->prio_list.next, struct plist_node, + prio_list); + node_next = &iter->node_list; + goto queue; + } + plist_for_each_continue(iter, head) { if (node->prio != iter->prio) { node_next = &iter->node_list; break; } } +queue: list_add_tail(&node->node_list, node_next); plist_check_head(head); diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 8655a76d29a1..690cede46ac2 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -14,6 +14,7 @@ __param(int, nnodes, 100, "Number of nodes in the rb-tree"); __param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree"); __param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree"); +__param(ullong, seed, 3141592653589793238ULL, "Random seed"); struct test_node { u32 key; @@ -239,19 +240,14 @@ static void check_augmented(int nr_nodes) } } -static int __init rbtree_test_init(void) +static int basic_check(void) { int i, j; cycles_t time1, time2, time; struct rb_node *node; - nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL); - if (!nodes) - return -ENOMEM; - printk(KERN_ALERT "rbtree testing"); - prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); @@ -343,6 +339,14 @@ static int __init rbtree_test_init(void) check(0); } + return 0; +} + +static int augmented_check(void) +{ + int i, j; + cycles_t time1, time2, time; + printk(KERN_ALERT "augmented rbtree testing"); init(); @@ -390,6 +394,20 @@ static int __init rbtree_test_init(void) check_augmented(0); } + return 0; +} + +static int __init rbtree_test_init(void) +{ + nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL); + if (!nodes) + return -ENOMEM; + + prandom_seed_state(&rnd, seed); + + basic_check(); + augmented_check(); + kfree(nodes); return -EAGAIN; /* Fail will directly unload the module */ diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index 3a1d8d34182e..8fb2a3e17c0e 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c @@ -151,9 +151,6 @@ static const config configuration_table[10] = { * meaning. */ -#define EQUAL 0 -/* result of memcmp for equal strings */ - /* =========================================================================== * Update a hash value with the given input byte * IN assertion: all calls to UPDATE_HASH are made with consecutive @@ -713,8 +710,7 @@ static void check_match( ) { /* check that the match is indeed a match */ - if (memcmp((char *)s->window + match, - (char *)s->window + start, length) != EQUAL) { + if (memcmp((char *)s->window + match, (char *)s->window + start, length)) { fprintf(stderr, " start %u, match %u, length %d\n", start, match, length); do { |