From 1614b2b11fab29dd4ff31ebba9d266961f5af69e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 29 Nov 2021 14:28:41 +0000 Subject: arch: Make ARCH_STACKWALK independent of STACKTRACE Make arch_stack_walk() available for ARCH_STACKWALK architectures without it being entangled in STACKTRACE. Link: https://lore.kernel.org/lkml/20211022152104.356586621@infradead.org/ Signed-off-by: Peter Zijlstra (Intel) [Mark: rebase, drop unnecessary arm change] Signed-off-by: Mark Rutland Cc: Albert Ou Cc: Borislav Petkov Cc: Christian Borntraeger Cc: Dave Hansen Cc: Heiko Carstens Cc: Ingo Molnar Cc: Michael Ellerman Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Thomas Gleixner Cc: Vasily Gorbik Link: https://lore.kernel.org/r/20211129142849.3056714-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64/kernel/stacktrace.c') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 94f83cd44e50..e6ba6b000564 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -221,8 +221,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) barrier(); } -#ifdef CONFIG_STACKTRACE - noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) @@ -241,5 +239,3 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, walk_stackframe(task, &frame, consume_entry, cookie); } - -#endif -- cgit From 2dad6dc17bd0cefd03b42147c6fc9bbd81f7928a Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Mon, 29 Nov 2021 14:28:48 +0000 Subject: arm64: Make dump_backtrace() use arch_stack_walk() To enable RELIABLE_STACKTRACE and LIVEPATCH on arm64, we need to substantially rework arm64's unwinding code. As part of this, we want to minimize the set of unwind interfaces we expose, and avoid open-coding of unwind logic. Currently, dump_backtrace() walks the stack of the current task or a blocked task by calling stact_backtrace() and iterating unwind steps using unwind_frame(). This can be written more simply in terms of arch_stack_walk(), considering three distinct cases: 1) When unwinding a blocked task, start_backtrace() is called with the blocked task's saved PC and FP, and the unwind proceeds immediately from this point without skipping any entries. This is functionally equivalent to calling arch_stack_walk() with the blocked task, which will start with the task's saved PC and FP. There is no functional change to this case. 2) When unwinding the current task without regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind proceeds immediately without skipping. This is *almost* functionally equivalent to calling arch_stack_walk() for the current task, which will start with its caller (i.e. an offset into dump_backtrace()) as the PC, and the callers frame record as the next frame. The only difference being that dump_backtrace() will be reported with an offset (which is strictly more correct than currently). Otherwise there is no functional cahnge to this case. 3) When unwinding the current task with regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind is performed silently until the next frame is the frame pointed to by regs->fp. Reporting starts from regs->pc and continues from the frame in regs->fp. Historically, this pre-unwind was necessary to correctly record return addresses rewritten by the ftrace graph calller, but this is no longer necessary as these are now recovered using the FP since commit: c6d3cd32fd0064af ("arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR") This pre-unwind is not necessary to recover return addresses rewritten by kretprobes, which historically were not recovered, and are now recovered using the FP since commit: cd9bc2c9258816dc ("arm64: Recover kretprobe modified return address in stacktrace") Thus, this is functionally equivalent to calling arch_stack_walk() with the current task and regs, which will start with regs->pc as the PC and regs->fp as the next frame, without a pre-unwind. This patch makes dump_backtrace() use arch_stack_walk(). This simplifies dump_backtrace() and will permit subsequent changes to the unwind code. Aside from the improved reporting when unwinding current without regs, there should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman [Mark: elaborate commit message] Signed-off-by: Mark Rutland Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20211129142849.3056714-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 44 ++++++------------------------------------ 1 file changed, 6 insertions(+), 38 deletions(-) (limited to 'arch/arm64/kernel/stacktrace.c') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index e6ba6b000564..9fc771a05306 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -156,24 +156,20 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, } NOKPROBE_SYMBOL(walk_stackframe); -static void dump_backtrace_entry(unsigned long where, const char *loglvl) +static bool dump_backtrace_entry(void *arg, unsigned long where) { + char *loglvl = arg; printk("%s %pSb\n", loglvl, (void *)where); + return true; } void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl) { - struct stackframe frame; - int skip = 0; - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - if (regs) { - if (user_mode(regs)) - return; - skip = 1; - } + if (regs && user_mode(regs)) + return; if (!tsk) tsk = current; @@ -181,36 +177,8 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, if (!try_get_task_stack(tsk)) return; - if (tsk == current) { - start_backtrace(&frame, - (unsigned long)__builtin_frame_address(0), - (unsigned long)dump_backtrace); - } else { - /* - * task blocked in __switch_to - */ - start_backtrace(&frame, - thread_saved_fp(tsk), - thread_saved_pc(tsk)); - } - printk("%sCall trace:\n", loglvl); - do { - /* skip until specified stack frame */ - if (!skip) { - dump_backtrace_entry(frame.pc, loglvl); - } else if (frame.fp == regs->regs[29]) { - skip = 0; - /* - * Mostly, this is the case where this function is - * called in panic/abort. As exception handler's - * stack frame does not contain the corresponding pc - * at which an exception has taken place, use regs->pc - * instead. - */ - dump_backtrace_entry(regs->pc, loglvl); - } - } while (!unwind_frame(tsk, &frame)); + arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); put_task_stack(tsk); } -- cgit From d2d1d2645cfd7230fd958fddbc5e7525c34f1374 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 14:28:49 +0000 Subject: arm64: Make some stacktrace functions private Now that open-coded stack unwinds have been converted to arch_stack_walk(), we no longer need to expose any of unwind_frame(), walk_stackframe(), or start_backtrace() outside of stacktrace.c. Make those functions private to stacktrace.c, removing their prototypes from and marking them static. Signed-off-by: Mark Rutland Reviewed-by: Mark Brown Cc: Madhavan T. Venkataraman Link: https://lore.kernel.org/r/20211129142849.3056714-10-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kernel/stacktrace.c') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 9fc771a05306..0fb58fed54cb 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -33,8 +33,8 @@ */ -void start_backtrace(struct stackframe *frame, unsigned long fp, - unsigned long pc) +static void start_backtrace(struct stackframe *frame, unsigned long fp, + unsigned long pc) { frame->fp = fp; frame->pc = pc; @@ -63,7 +63,8 @@ void start_backtrace(struct stackframe *frame, unsigned long fp, * records (e.g. a cycle), determined based on the location and fp value of A * and the location (but not the fp value) of B. */ -int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) +static int notrace unwind_frame(struct task_struct *tsk, + struct stackframe *frame) { unsigned long fp = frame->fp; struct stack_info info; @@ -141,8 +142,9 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) } NOKPROBE_SYMBOL(unwind_frame); -void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, - bool (*fn)(void *, unsigned long), void *data) +static void notrace walk_stackframe(struct task_struct *tsk, + struct stackframe *frame, + bool (*fn)(void *, unsigned long), void *data) { while (1) { int ret; -- cgit