summaryrefslogtreecommitdiff
path: root/kernel/smp.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2024-08-05 11:44:43 -0700
committerPaul E. McKenney <paulmck@kernel.org>2024-10-11 09:31:21 -0700
commit9861f7f66f98a6358c944c17a5d4acd07abcb1a7 (patch)
tree89823d6cbadcb42ffe6e08408f25007ae2cf9799 /kernel/smp.c
parent9852d85ec9d492ebef56dc5f229416c925758edc (diff)
locking/csd-lock: Switch from sched_clock() to ktime_get_mono_fast_ns()
Currently, the CONFIG_CSD_LOCK_WAIT_DEBUG code uses sched_clock() to check for excessive CSD-lock wait times. This works, but does not guarantee monotonic timestamps on x86 due to the sched_clock() function's use of the rdtsc instruction, which does not guarantee ordering. This means that, given successive calls to sched_clock(), the second might return an earlier time than the second, that is, time might seem to go backwards. This can (and does!) result in false-positive CSD-lock wait complaints claiming almost 2^64 nanoseconds of delay. Therefore, switch from sched_clock() to ktime_get_mono_fast_ns(), which does guarantee monotonic timestamps via the rdtsc_ordered() function, which as the name implies, does guarantee ordered timestamps, at least in the absence of calls from NMI handlers, which are not involved in this code path. Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Rik van Riel <riel@surriel.com> Cc: Neeraj Upadhyay <neeraj.upadhyay@kernel.org> Cc: Leonardo Bras <leobras@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index f25e20617b7e..27dc31a146a3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -246,7 +246,7 @@ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, in
return true;
}
- ts2 = sched_clock();
+ ts2 = ktime_get_mono_fast_ns();
/* How long since we last checked for a stuck CSD lock.*/
ts_delta = ts2 - *ts1;
if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) *
@@ -321,7 +321,7 @@ static void __csd_lock_wait(call_single_data_t *csd)
int bug_id = 0;
u64 ts0, ts1;
- ts1 = ts0 = sched_clock();
+ ts1 = ts0 = ktime_get_mono_fast_ns();
for (;;) {
if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages))
break;