summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c3
-rw-r--r--arch/x86/kernel/ftrace.c7
-rw-r--r--arch/x86/kernel/ftrace_64.S12
-rw-r--r--arch/x86/net/bpf_jit_comp.c97
5 files changed, 86 insertions, 34 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 34fb46d5341b..17a107cc5244 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -230,6 +230,7 @@ config X86
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64
select HAVE_FTRACE_REGS_HAVING_PT_REGS if X86_64
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ select HAVE_DYNAMIC_FTRACE_WITH_JMP if X86_64
select HAVE_SAMPLE_FTRACE_DIRECT if X86_64
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
select HAVE_EBPF_JIT
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index bb6e2c47ffc6..48405e02d6e4 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -693,8 +693,7 @@ ctr_crypt(struct skcipher_request *req,
* operation into two at the point where the overflow
* will occur. After the first part, add the carry bit.
*/
- p1_nbytes = min_t(unsigned int, nbytes,
- (nblocks - ctr64) * AES_BLOCK_SIZE);
+ p1_nbytes = min(nbytes, (nblocks - ctr64) * AES_BLOCK_SIZE);
(*ctr64_func)(key, walk.src.virt.addr,
walk.dst.virt.addr, p1_nbytes, le_ctr);
le_ctr[0] = 0;
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4450acec9390..0543b57f54ee 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
* No need to translate into a callthunk. The trampoline does
* the depth accounting itself.
*/
- return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+ if (ftrace_is_jmp(addr)) {
+ addr = ftrace_jmp_get(addr);
+ return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
+ } else {
+ return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+ }
}
static int ftrace_verify_code(unsigned long ip, const char *old_code)
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 823dbdd0eb41..a132608265f6 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
RET
+1:
+ testb $1, %al
+ jz 2f
+ andq $0xfffffffffffffffe, %rax
+ movq %rax, MCOUNT_REG_SIZE+8(%rsp)
+ restore_mcount_regs
+ /* Restore flags */
+ popfq
+ RET
+
/* Swap the flags with orig_rax */
-1: movq MCOUNT_REG_SIZE(%rsp), %rdi
+2: movq MCOUNT_REG_SIZE(%rsp), %rdi
movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
movq %rax, MCOUNT_REG_SIZE(%rsp)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index de5083cb1d37..b69dc7194e2c 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -597,7 +597,8 @@ static int emit_jump(u8 **pprog, void *func, void *ip)
return emit_patch(pprog, func, ip, 0xE9);
}
-static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
+ enum bpf_text_poke_type new_t,
void *old_addr, void *new_addr)
{
const u8 *nop_insn = x86_nops[5];
@@ -607,9 +608,9 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
int ret;
memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
- if (old_addr) {
+ if (old_t != BPF_MOD_NOP && old_addr) {
prog = old_insn;
- ret = t == BPF_MOD_CALL ?
+ ret = old_t == BPF_MOD_CALL ?
emit_call(&prog, old_addr, ip) :
emit_jump(&prog, old_addr, ip);
if (ret)
@@ -617,9 +618,9 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
}
memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
- if (new_addr) {
+ if (new_t != BPF_MOD_NOP && new_addr) {
prog = new_insn;
- ret = t == BPF_MOD_CALL ?
+ ret = new_t == BPF_MOD_CALL ?
emit_call(&prog, new_addr, ip) :
emit_jump(&prog, new_addr, ip);
if (ret)
@@ -640,8 +641,9 @@ out:
return ret;
}
-int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
- void *old_addr, void *new_addr)
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
+ enum bpf_text_poke_type new_t, void *old_addr,
+ void *new_addr)
{
if (!is_kernel_text((long)ip) &&
!is_bpf_text_address((long)ip))
@@ -655,29 +657,43 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
if (is_endbr(ip))
ip += ENDBR_INSN_SIZE;
- return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
+ return __bpf_arch_text_poke(ip, old_t, new_t, old_addr, new_addr);
}
#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
-static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
+static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
{
u8 *prog = *pprog;
+ if (ereg)
+ EMIT1(0x41);
+
+ EMIT2(0xFF, 0xE0 + reg);
+
+ *pprog = prog;
+}
+
+static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
+{
+ u8 *prog = *pprog;
+ int reg = reg2hex[bpf_reg];
+ bool ereg = is_ereg(bpf_reg);
+
if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
OPTIMIZER_HIDE_VAR(reg);
- emit_jump(&prog, its_static_thunk(reg), ip);
+ emit_jump(&prog, its_static_thunk(reg + 8*ereg), ip);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
EMIT_LFENCE();
- EMIT2(0xFF, 0xE0 + reg);
+ __emit_indirect_jump(&prog, reg, ereg);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
OPTIMIZER_HIDE_VAR(reg);
if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
- emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
+ emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip);
else
- emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
+ emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip);
} else {
- EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
+ __emit_indirect_jump(&prog, reg, ereg);
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
EMIT1(0xCC); /* int3 */
}
@@ -797,7 +813,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
* rdi == ctx (1st arg)
* rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
*/
- emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
+ emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));
/* out: */
ctx->tail_call_indirect_label = prog - start;
@@ -883,12 +899,13 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
target = array->ptrs[poke->tail_call.key];
if (target) {
ret = __bpf_arch_text_poke(poke->tailcall_target,
- BPF_MOD_JUMP, NULL,
+ BPF_MOD_NOP, BPF_MOD_JUMP,
+ NULL,
(u8 *)target->bpf_func +
poke->adj_off);
BUG_ON(ret < 0);
ret = __bpf_arch_text_poke(poke->tailcall_bypass,
- BPF_MOD_JUMP,
+ BPF_MOD_JUMP, BPF_MOD_NOP,
(u8 *)poke->tailcall_target +
X86_PATCH_SIZE, NULL);
BUG_ON(ret < 0);
@@ -2614,6 +2631,9 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
break;
+ case BPF_JMP | BPF_JA | BPF_X:
+ emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]);
+ break;
case BPF_JMP | BPF_JA:
case BPF_JMP32 | BPF_JA:
if (BPF_CLASS(insn->code) == BPF_JMP) {
@@ -2830,9 +2850,10 @@ static int get_nr_used_regs(const struct btf_func_model *m)
}
static void save_args(const struct btf_func_model *m, u8 **prog,
- int stack_size, bool for_call_origin)
+ int stack_size, bool for_call_origin, u32 flags)
{
int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
+ bool use_jmp = bpf_trampoline_use_jmp(flags);
int i, j;
/* Store function arguments to stack.
@@ -2873,7 +2894,7 @@ static void save_args(const struct btf_func_model *m, u8 **prog,
*/
for (j = 0; j < arg_regs; j++) {
emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
- nr_stack_slots * 8 + 0x18);
+ nr_stack_slots * 8 + 16 + (!use_jmp) * 8);
emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
-stack_size);
@@ -3267,12 +3288,17 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
* should be 16-byte aligned. Following code depend on
* that stack_size is already 8-byte aligned.
*/
- stack_size += (stack_size % 16) ? 0 : 8;
+ if (bpf_trampoline_use_jmp(flags)) {
+ /* no rip in the "jmp" case */
+ stack_size += (stack_size % 16) ? 8 : 0;
+ } else {
+ stack_size += (stack_size % 16) ? 0 : 8;
+ }
}
arg_stack_off = stack_size;
- if (flags & BPF_TRAMP_F_SKIP_FRAME) {
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* skip patched call instruction and point orig_call to actual
* body of the kernel function.
*/
@@ -3327,7 +3353,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
}
- save_args(m, &prog, regs_off, false);
+ save_args(m, &prog, regs_off, false, flags);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
@@ -3360,7 +3386,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
if (flags & BPF_TRAMP_F_CALL_ORIG) {
restore_regs(m, &prog, regs_off);
- save_args(m, &prog, arg_stack_off, true);
+ save_args(m, &prog, arg_stack_off, true, flags);
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
/* Before calling the original function, load the
@@ -3543,7 +3569,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image,
if (err)
return err;
- emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
+ emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));
*pprog = prog;
return 0;
@@ -3827,6 +3853,15 @@ out_image:
jit_data->header = header;
jit_data->rw_header = rw_header;
}
+
+ /*
+ * The bpf_prog_update_insn_ptrs function expects addrs to
+ * point to the first byte of the jitted instruction (unlike
+ * the bpf_prog_fill_jited_linfo below, which, for historical
+ * reasons, expects to point to the next instruction)
+ */
+ bpf_prog_update_insn_ptrs(prog, addrs, image);
+
/*
* ctx.prog_offset is used when CFI preambles put code *before*
* the function. See emit_cfi(). For FineIBT specifically this code
@@ -3953,6 +3988,7 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
struct bpf_prog *new, struct bpf_prog *old)
{
u8 *old_addr, *new_addr, *old_bypass_addr;
+ enum bpf_text_poke_type t;
int ret;
old_bypass_addr = old ? NULL : poke->bypass_addr;
@@ -3965,21 +4001,22 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
* the kallsyms check.
*/
if (new) {
+ t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
ret = __bpf_arch_text_poke(poke->tailcall_target,
- BPF_MOD_JUMP,
+ t, BPF_MOD_JUMP,
old_addr, new_addr);
BUG_ON(ret < 0);
if (!old) {
ret = __bpf_arch_text_poke(poke->tailcall_bypass,
- BPF_MOD_JUMP,
+ BPF_MOD_JUMP, BPF_MOD_NOP,
poke->bypass_addr,
NULL);
BUG_ON(ret < 0);
}
} else {
+ t = old_bypass_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
ret = __bpf_arch_text_poke(poke->tailcall_bypass,
- BPF_MOD_JUMP,
- old_bypass_addr,
+ t, BPF_MOD_JUMP, old_bypass_addr,
poke->bypass_addr);
BUG_ON(ret < 0);
/* let other CPUs finish the execution of program
@@ -3988,9 +4025,9 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
*/
if (!ret)
synchronize_rcu();
+ t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
ret = __bpf_arch_text_poke(poke->tailcall_target,
- BPF_MOD_JUMP,
- old_addr, NULL);
+ t, BPF_MOD_NOP, old_addr, NULL);
BUG_ON(ret < 0);
}
}