diff options
| author | Menglong Dong <menglong8.dong@gmail.com> | 2025-11-18 20:36:30 +0800 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2025-11-24 09:46:37 -0800 |
| commit | 0c3772a8db1f697c22dd6caef1fe938fbb0e0a5b (patch) | |
| tree | bf2436f0211c8ca443112a9c00facd389d6a6ec6 | |
| parent | 25e4e3565d45f567f78089f38822fa64abee5230 (diff) | |
x86/ftrace: Implement DYNAMIC_FTRACE_WITH_JMP
Implement the DYNAMIC_FTRACE_WITH_JMP for x86_64. In ftrace_call_replace,
we will use JMP32_INSN_OPCODE instead of CALL_INSN_OPCODE if the address
should use "jmp".
Meanwhile, adjust the direct call in the ftrace_regs_caller. The RSB is
balanced in the "jmp" mode. Take the function "foo" for example:
original_caller:
call foo -> foo:
call fentry -> fentry:
[do ftrace callbacks ]
move tramp_addr to stack
RET -> tramp_addr
tramp_addr:
[..]
call foo_body -> foo_body:
[..]
RET -> back to tramp_addr
[..]
RET -> back to original_caller
Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/r/20251118123639.688444-3-dongml2@chinatelecom.cn
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
| -rw-r--r-- | arch/x86/Kconfig | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/ftrace.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/ftrace_64.S | 12 |
3 files changed, 18 insertions, 2 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fa3b616af03a..462250a20311 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -230,6 +230,7 @@ config X86 select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64 select HAVE_FTRACE_REGS_HAVING_PT_REGS if X86_64 select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + select HAVE_DYNAMIC_FTRACE_WITH_JMP if X86_64 select HAVE_SAMPLE_FTRACE_DIRECT if X86_64 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64 select HAVE_EBPF_JIT diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 4450acec9390..0543b57f54ee 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) * No need to translate into a callthunk. The trampoline does * the depth accounting itself. */ - return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); + if (ftrace_is_jmp(addr)) { + addr = ftrace_jmp_get(addr); + return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); + } else { + return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); + } } static int ftrace_verify_code(unsigned long ip, const char *old_code) diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 823dbdd0eb41..a132608265f6 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR RET +1: + testb $1, %al + jz 2f + andq $0xfffffffffffffffe, %rax + movq %rax, MCOUNT_REG_SIZE+8(%rsp) + restore_mcount_regs + /* Restore flags */ + popfq + RET + /* Swap the flags with orig_rax */ -1: movq MCOUNT_REG_SIZE(%rsp), %rdi +2: movq MCOUNT_REG_SIZE(%rsp), %rdi movq %rdi, MCOUNT_REG_SIZE-8(%rsp) movq %rax, MCOUNT_REG_SIZE(%rsp) |
