summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLi RongQing <lirongqing@baidu.com>2025-11-26 10:51:47 +0800
committerLeon Romanovsky <leon@kernel.org>2025-11-26 03:15:36 -0500
commitf37e2868792335f2e8bbdcc02ebbb4830453f83c (patch)
tree5766bccca5963407d78133f2ba3a5500fe679735
parent01dad9ca37c60d08f71e2ef639875ae895deede6 (diff)
RDMA/core: Reduce cond_resched() frequency in __ib_umem_release
The current implementation calls cond_resched() for every SG entry in __ib_umem_release(), which can increase needless overhead. This patch introduces RESCHED_LOOP_CNT_THRESHOLD (0x1000) to limit how often cond_resched() is called. The function now yields the CPU once every 4096 iterations, and yield at the very first iteration for lots of small umem case, to reduce scheduling overhead. Fixes: d056bc45b62b ("RDMA/core: Prevent soft lockup during large user memory region cleanup") Signed-off-by: Li RongQing <lirongqing@baidu.com> Link: https://patch.msgid.link/20251126025147.2627-1-lirongqing@baidu.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
-rw-r--r--drivers/infiniband/core/umem.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 8fd84aa37289..8137031c2a65 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -45,6 +45,8 @@
#include "uverbs.h"
+#define RESCHED_LOOP_CNT_THRESHOLD 0x1000
+
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
bool make_dirty = umem->writable && dirty;
@@ -58,7 +60,9 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) {
unpin_user_page_range_dirty_lock(sg_page(sg),
DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
- cond_resched();
+
+ if (i && !(i % RESCHED_LOOP_CNT_THRESHOLD))
+ cond_resched();
}
sg_free_append_table(&umem->sgt_append);