summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorBrandon Kammerdiener <brandon.kammerdiener@intel.com>2025-04-24 11:32:51 -0400
committerAlexei Starovoitov <ast@kernel.org>2025-04-25 08:36:59 -0700
commit75673fda0c557ae26078177dd14d4857afbf128d (patch)
tree7c1a500bbd7431dbd69b6c7d050bc818f967d8ec /kernel
parentf2858f308131a09e33afb766cd70119b5b900569 (diff)
bpf: fix possible endless loop in BPF map iteration
The _safe variant used here gets the next element before running the callback, avoiding the endless loop condition. Signed-off-by: Brandon Kammerdiener <brandon.kammerdiener@intel.com> Link: https://lore.kernel.org/r/20250424153246.141677-2-brandon.kammerdiener@intel.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Hou Tao <houtao1@huawei.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/hashtab.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 5a5adc66b8e2..92b606d60020 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -2189,7 +2189,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
b = &htab->buckets[i];
rcu_read_lock();
head = &b->head;
- hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
+ hlist_nulls_for_each_entry_safe(elem, n, head, hash_node) {
key = elem->key;
if (is_percpu) {
/* current cpu value for percpu map */