diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-02-20 15:38:41 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-02-20 16:31:14 -0800 |
commit | ee8d72a157ebb4b8c4b8b664f5a78a341fede2ef (patch) | |
tree | 4c6abf1db03b7bffaf8d9a41ecda74b9041312fb /arch | |
parent | 01bb11ad828b320749764fa93ad078db20d08a9e (diff) | |
parent | 168de0233586fb06c5c5c56304aa9a928a09b0ba (diff) | |
download | linux-rpi-ee8d72a157ebb4b8c4b8b664f5a78a341fede2ef.tar.gz linux-rpi-ee8d72a157ebb4b8c4b8b664f5a78a341fede2ef.tar.bz2 linux-rpi-ee8d72a157ebb4b8c4b8b664f5a78a341fede2ef.zip |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2023-02-17
We've added 64 non-merge commits during the last 7 day(s) which contain
a total of 158 files changed, 4190 insertions(+), 988 deletions(-).
The main changes are:
1) Add a rbtree data structure following the "next-gen data structure"
precedent set by recently-added linked-list, that is, by using
kfunc + kptr instead of adding a new BPF map type, from Dave Marchevsky.
2) Add a new benchmark for hashmap lookups to BPF selftests,
from Anton Protopopov.
3) Fix bpf_fib_lookup to only return valid neighbors and add an option
to skip the neigh table lookup, from Martin KaFai Lau.
4) Add cgroup.memory=nobpf kernel parameter option to disable BPF memory
accouting for container environments, from Yafang Shao.
5) Batch of ice multi-buffer and driver performance fixes,
from Alexander Lobakin.
6) Fix a bug in determining whether global subprog's argument is
PTR_TO_CTX, which is based on type names which breaks kprobe progs,
from Andrii Nakryiko.
7) Prep work for future -mcpu=v4 LLVM option which includes usage of
BPF_ST insn. Thus improve BPF_ST-related value tracking in verifier,
from Eduard Zingerman.
8) More prep work for later building selftests with Memory Sanitizer
in order to detect usages of undefined memory, from Ilya Leoshkevich.
9) Fix xsk sockets to check IFF_UP earlier to avoid a NULL pointer
dereference via sendmsg(), from Maciej Fijalkowski.
10) Implement BPF trampoline for RV64 JIT compiler, from Pu Lehui.
11) Fix BPF memory allocator in combination with BPF hashtab where it could
corrupt special fields e.g. used in bpf_spin_lock, from Hou Tao.
12) Fix LoongArch BPF JIT to always use 4 instructions for function
address so that instruction sequences don't change between passes,
from Hengqi Chen.
* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (64 commits)
selftests/bpf: Add bpf_fib_lookup test
bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup
riscv, bpf: Add bpf trampoline support for RV64
riscv, bpf: Add bpf_arch_text_poke support for RV64
riscv, bpf: Factor out emit_call for kernel and bpf context
riscv: Extend patch_text for multiple instructions
Revert "bpf, test_run: fix &xdp_frame misplacement for LIVE_FRAMES"
selftests/bpf: Add global subprog context passing tests
selftests/bpf: Convert test_global_funcs test to test_loader framework
bpf: Fix global subprog context argument resolution logic
LoongArch, bpf: Use 4 instructions for function address in JIT
bpf: bpf_fib_lookup should not return neigh in NUD_FAILED state
bpf: Disable bh in bpf_test_run for xdp and tc prog
xsk: check IFF_UP earlier in Tx path
Fix typos in selftest/bpf files
selftests/bpf: Use bpf_{btf,link,map,prog}_get_info_by_fd()
samples/bpf: Use bpf_{btf,link,map,prog}_get_info_by_fd()
bpftool: Use bpf_{btf,link,map,prog}_get_info_by_fd()
libbpf: Use bpf_{btf,link,map,prog}_get_info_by_fd()
libbpf: Introduce bpf_{btf,link,map,prog}_get_info_by_fd()
...
====================
Link: https://lore.kernel.org/r/20230217221737.31122-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/loongarch/net/bpf_jit.c | 2 | ||||
-rw-r--r-- | arch/loongarch/net/bpf_jit.h | 21 | ||||
-rw-r--r-- | arch/riscv/include/asm/patch.h | 2 | ||||
-rw-r--r-- | arch/riscv/kernel/patch.c | 19 | ||||
-rw-r--r-- | arch/riscv/kernel/probes/kprobes.c | 15 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit.h | 5 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit_comp64.c | 435 |
7 files changed, 464 insertions, 35 deletions
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index c4b1947ebf76..288003a9f0ca 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -841,7 +841,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext if (ret < 0) return ret; - move_imm(ctx, t1, func_addr, is32); + move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0); move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); break; diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index ca708024fdd3..c335dc4eed37 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -82,6 +82,27 @@ static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, boo emit_insn(ctx, addiw, reg, reg, 0); } +static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr) +{ + u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52; + + /* lu12iw rd, imm_31_12 */ + imm_31_12 = (addr >> 12) & 0xfffff; + emit_insn(ctx, lu12iw, rd, imm_31_12); + + /* ori rd, rd, imm_11_0 */ + imm_11_0 = addr & 0xfff; + emit_insn(ctx, ori, rd, rd, imm_11_0); + + /* lu32id rd, imm_51_32 */ + imm_51_32 = (addr >> 32) & 0xfffff; + emit_insn(ctx, lu32id, rd, imm_51_32); + + /* lu52id rd, rd, imm_63_52 */ + imm_63_52 = (addr >> 52) & 0xfff; + emit_insn(ctx, lu52id, rd, rd, imm_63_52); +} + static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32) { long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31; diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h index 9a7d7346001e..f433121774c0 100644 --- a/arch/riscv/include/asm/patch.h +++ b/arch/riscv/include/asm/patch.h @@ -7,6 +7,6 @@ #define _ASM_RISCV_PATCH_H int patch_text_nosync(void *addr, const void *insns, size_t len); -int patch_text(void *addr, u32 insn); +int patch_text(void *addr, u32 *insns, int ninsns); #endif /* _ASM_RISCV_PATCH_H */ diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 765004b60513..8086d1a281cd 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -15,7 +15,8 @@ struct patch_insn { void *addr; - u32 insn; + u32 *insns; + int ninsns; atomic_t cpu_count; }; @@ -102,12 +103,15 @@ NOKPROBE_SYMBOL(patch_text_nosync); static int patch_text_cb(void *data) { struct patch_insn *patch = data; - int ret = 0; + unsigned long len; + int i, ret = 0; if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { - ret = - patch_text_nosync(patch->addr, &patch->insn, - GET_INSN_LENGTH(patch->insn)); + for (i = 0; ret == 0 && i < patch->ninsns; i++) { + len = GET_INSN_LENGTH(patch->insns[i]); + ret = patch_text_nosync(patch->addr + i * len, + &patch->insns[i], len); + } atomic_inc(&patch->cpu_count); } else { while (atomic_read(&patch->cpu_count) <= num_online_cpus()) @@ -119,11 +123,12 @@ static int patch_text_cb(void *data) } NOKPROBE_SYMBOL(patch_text_cb); -int patch_text(void *addr, u32 insn) +int patch_text(void *addr, u32 *insns, int ninsns) { struct patch_insn patch = { .addr = addr, - .insn = insn, + .insns = insns, + .ninsns = ninsns, .cpu_count = ATOMIC_INIT(0), }; diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 2bedec37d092..2f08c14a933d 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -23,13 +23,14 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { + u32 insn = __BUG_INSN_32; unsigned long offset = GET_INSN_LENGTH(p->opcode); p->ainsn.api.restore = (unsigned long)p->addr + offset; - patch_text(p->ainsn.api.insn, p->opcode); + patch_text(p->ainsn.api.insn, &p->opcode, 1); patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset), - __BUG_INSN_32); + &insn, 1); } static void __kprobes arch_prepare_simulate(struct kprobe *p) @@ -116,16 +117,16 @@ void *alloc_insn_page(void) /* install breakpoint in text */ void __kprobes arch_arm_kprobe(struct kprobe *p) { - if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) - patch_text(p->addr, __BUG_INSN_32); - else - patch_text(p->addr, __BUG_INSN_16); + u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ? + __BUG_INSN_32 : __BUG_INSN_16; + + patch_text(p->addr, &insn, 1); } /* remove breakpoint from text */ void __kprobes arch_disarm_kprobe(struct kprobe *p) { - patch_text(p->addr, p->opcode); + patch_text(p->addr, &p->opcode, 1); } void __kprobes arch_remove_kprobe(struct kprobe *p) diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index d926e0f7ef57..bf9802a63061 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -573,6 +573,11 @@ static inline u32 rv_fence(u8 pred, u8 succ) return rv_i_insn(imm11_0, 0, 0, 0, 0xf); } +static inline u32 rv_nop(void) +{ + return rv_i_insn(0, 0, 0, 0, 0x13); +} + /* RVC instrutions. */ static inline u16 rvc_addi4spn(u8 rd, u32 imm10) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index f2417ac54edd..f5a668736c79 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -8,6 +8,8 @@ #include <linux/bitfield.h> #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/memory.h> +#include <linux/stop_machine.h> #include "bpf_jit.h" #define RV_REG_TCC RV_REG_A6 @@ -238,7 +240,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) if (!is_tail_call) emit_mv(RV_REG_A0, RV_REG_A5, ctx); emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, - is_tail_call ? 4 : 0, /* skip TCC init */ + is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */ ctx); } @@ -428,12 +430,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) *rd = RV_REG_T2; } -static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr, +static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr, struct rv_jit_context *ctx) { s64 upper, lower; - if (rvoff && is_21b_int(rvoff) && !force_jalr) { + if (rvoff && fixed_addr && is_21b_int(rvoff)) { emit(rv_jal(rd, rvoff >> 1), ctx); return 0; } else if (in_auipc_jalr_range(rvoff)) { @@ -454,24 +456,17 @@ static bool is_signed_bpf_cond(u8 cond) cond == BPF_JSGE || cond == BPF_JSLE; } -static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) +static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx) { s64 off = 0; u64 ip; - u8 rd; - int ret; if (addr && ctx->insns) { ip = (u64)(long)(ctx->insns + ctx->ninsns); off = addr - ip; } - ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx); - if (ret) - return ret; - rd = bpf_to_rv_reg(BPF_REG_0, ctx); - emit_mv(rd, RV_REG_A0, ctx); - return 0; + return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx); } static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, @@ -622,6 +617,401 @@ static int add_exception_handler(const struct bpf_insn *insn, return 0; } +static int gen_call_or_nops(void *target, void *ip, u32 *insns) +{ + s64 rvoff; + int i, ret; + struct rv_jit_context ctx; + + ctx.ninsns = 0; + ctx.insns = (u16 *)insns; + + if (!target) { + for (i = 0; i < 4; i++) + emit(rv_nop(), &ctx); + return 0; + } + + rvoff = (s64)(target - (ip + 4)); + emit(rv_sd(RV_REG_SP, -8, RV_REG_RA), &ctx); + ret = emit_jump_and_link(RV_REG_RA, rvoff, false, &ctx); + if (ret) + return ret; + emit(rv_ld(RV_REG_RA, -8, RV_REG_SP), &ctx); + + return 0; +} + +static int gen_jump_or_nops(void *target, void *ip, u32 *insns) +{ + s64 rvoff; + struct rv_jit_context ctx; + + ctx.ninsns = 0; + ctx.insns = (u16 *)insns; + + if (!target) { + emit(rv_nop(), &ctx); + emit(rv_nop(), &ctx); + return 0; + } + + rvoff = (s64)(target - ip); + return emit_jump_and_link(RV_REG_ZERO, rvoff, false, &ctx); +} + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, + void *old_addr, void *new_addr) +{ + u32 old_insns[4], new_insns[4]; + bool is_call = poke_type == BPF_MOD_CALL; + int (*gen_insns)(void *target, void *ip, u32 *insns); + int ninsns = is_call ? 4 : 2; + int ret; + + if (!is_bpf_text_address((unsigned long)ip)) + return -ENOTSUPP; + + gen_insns = is_call ? gen_call_or_nops : gen_jump_or_nops; + + ret = gen_insns(old_addr, ip, old_insns); + if (ret) + return ret; + + if (memcmp(ip, old_insns, ninsns * 4)) + return -EFAULT; + + ret = gen_insns(new_addr, ip, new_insns); + if (ret) + return ret; + + cpus_read_lock(); + mutex_lock(&text_mutex); + if (memcmp(ip, new_insns, ninsns * 4)) + ret = patch_text(ip, new_insns, ninsns); + mutex_unlock(&text_mutex); + cpus_read_unlock(); + + return ret; +} + +static void store_args(int nregs, int args_off, struct rv_jit_context *ctx) +{ + int i; + + for (i = 0; i < nregs; i++) { + emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); + args_off -= 8; + } +} + +static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx) +{ + int i; + + for (i = 0; i < nregs; i++) { + emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx); + args_off -= 8; + } +} + +static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off, + int run_ctx_off, bool save_ret, struct rv_jit_context *ctx) +{ + int ret, branch_off; + struct bpf_prog *p = l->link.prog; + int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); + + if (l->cookie) { + emit_imm(RV_REG_T1, l->cookie, ctx); + emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx); + } else { + emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx); + } + + /* arg1: prog */ + emit_imm(RV_REG_A0, (const s64)p, ctx); + /* arg2: &run_ctx */ + emit_addi(RV_REG_A1, RV_REG_FP, -run_ctx_off, ctx); + ret = emit_call((const u64)bpf_trampoline_enter(p), true, ctx); + if (ret) + return ret; + + /* if (__bpf_prog_enter(prog) == 0) + * goto skip_exec_of_prog; + */ + branch_off = ctx->ninsns; + /* nop reserved for conditional jump */ + emit(rv_nop(), ctx); + + /* store prog start time */ + emit_mv(RV_REG_S1, RV_REG_A0, ctx); + + /* arg1: &args_off */ + emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx); + if (!p->jited) + /* arg2: progs[i]->insnsi for interpreter */ + emit_imm(RV_REG_A1, (const s64)p->insnsi, ctx); + ret = emit_call((const u64)p->bpf_func, true, ctx); + if (ret) + return ret; + + if (save_ret) + emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx); + + /* update branch with beqz */ + if (ctx->insns) { + int offset = ninsns_rvoff(ctx->ninsns - branch_off); + u32 insn = rv_beq(RV_REG_A0, RV_REG_ZERO, offset >> 1); + *(u32 *)(ctx->insns + branch_off) = insn; + } + + /* arg1: prog */ + emit_imm(RV_REG_A0, (const s64)p, ctx); + /* arg2: prog start time */ + emit_mv(RV_REG_A1, RV_REG_S1, ctx); + /* arg3: &run_ctx */ + emit_addi(RV_REG_A2, RV_REG_FP, -run_ctx_off, ctx); + ret = emit_call((const u64)bpf_trampoline_exit(p), true, ctx); + + return ret; +} + +static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, + const struct btf_func_model *m, + struct bpf_tramp_links *tlinks, + void *func_addr, u32 flags, + struct rv_jit_context *ctx) +{ + int i, ret, offset; + int *branches_off = NULL; + int stack_size = 0, nregs = m->nr_args; + int retaddr_off, fp_off, retval_off, args_off; + int nregs_off, ip_off, run_ctx_off, sreg_off; + struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; + struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; + struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; + void *orig_call = func_addr; + bool save_ret; + u32 insn; + + /* Generated trampoline stack layout: + * + * FP - 8 [ RA of parent func ] return address of parent + * function + * FP - retaddr_off [ RA of traced func ] return address of traced + * function + * FP - fp_off [ FP of parent func ] + * + * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or + * BPF_TRAMP_F_RET_FENTRY_RET + * [ argN ] + * [ ... ] + * FP - args_off [ arg1 ] + * + * FP - nregs_off [ regs count ] + * + * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG + * + * FP - run_ctx_off [ bpf_tramp_run_ctx ] + * + * FP - sreg_off [ callee saved reg ] + * + * [ pads ] pads for 16 bytes alignment + */ + + if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) + return -ENOTSUPP; + + /* extra regiters for struct arguments */ + for (i = 0; i < m->nr_args; i++) + if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) + nregs += round_up(m->arg_size[i], 8) / 8 - 1; + + /* 8 arguments passed by registers */ + if (nregs > 8) + return -ENOTSUPP; + + /* room for parent function return address */ + stack_size += 8; + + stack_size += 8; + retaddr_off = stack_size; + + stack_size += 8; + fp_off = stack_size; + + save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); + if (save_ret) { + stack_size += 8; + retval_off = stack_size; + } + + stack_size += nregs * 8; + args_off = stack_size; + + stack_size += 8; + nregs_off = stack_size; + + if (flags & BPF_TRAMP_F_IP_ARG) { + stack_size += 8; + ip_off = stack_size; + } + + stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8); + run_ctx_off = stack_size; + + stack_size += 8; + sreg_off = stack_size; + + stack_size = round_up(stack_size, 16); + + emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx); + + emit_sd(RV_REG_SP, stack_size - retaddr_off, RV_REG_RA, ctx); + emit_sd(RV_REG_SP, stack_size - fp_off, RV_REG_FP, ctx); + + emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx); + + /* callee saved register S1 to pass start time */ + emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx); + + /* store ip address of the traced function */ + if (flags & BPF_TRAMP_F_IP_ARG) { + emit_imm(RV_REG_T1, (const s64)func_addr, ctx); + emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx); + } + + emit_li(RV_REG_T1, nregs, ctx); + emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx); + + store_args(nregs, args_off, ctx); + + /* skip to actual body of traced function */ + if (flags & BPF_TRAMP_F_SKIP_FRAME) + orig_call += 16; + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + emit_imm(RV_REG_A0, (const s64)im, ctx); + ret = emit_call((const u64)__bpf_tramp_enter, true, ctx); + if (ret) + return ret; + } + + for (i = 0; i < fentry->nr_links; i++) { + ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off, + flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx); + if (ret) + return ret; + } + + if (fmod_ret->nr_links) { + branches_off = kcalloc(fmod_ret->nr_links, sizeof(int), GFP_KERNEL); + if (!branches_off) + return -ENOMEM; + + /* cleanup to avoid garbage return value confusion */ + emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx); + for (i = 0; i < fmod_ret->nr_links; i++) { + ret = invoke_bpf_prog(fmod_ret->links[i], args_off, retval_off, + run_ctx_off, true, ctx); + if (ret) + goto out; + emit_ld(RV_REG_T1, -retval_off, RV_REG_FP, ctx); + branches_off[i] = ctx->ninsns; + /* nop reserved for conditional jump */ + emit(rv_nop(), ctx); + } + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + restore_args(nregs, args_off, ctx); + ret = emit_call((const u64)orig_call, true, ctx); + if (ret) + goto out; + emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx); + im->ip_after_call = ctx->insns + ctx->ninsns; + /* 2 nops reserved for auipc+jalr pair */ + emit(rv_nop(), ctx); + emit(rv_nop(), ctx); + } + + /* update branches saved in invoke_bpf_mod_ret with bnez */ + for (i = 0; ctx->insns && i < fmod_ret->nr_links; i++) { + offset = ninsns_rvoff(ctx->ninsns - branches_off[i]); + insn = rv_bne(RV_REG_T1, RV_REG_ZERO, offset >> 1); + *(u32 *)(ctx->insns + branches_off[i]) = insn; + } + + for (i = 0; i < fexit->nr_links; i++) { + ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off, + run_ctx_off, false, ctx); + if (ret) + goto out; + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + im->ip_epilogue = ctx->insns + ctx->ninsns; + emit_imm(RV_REG_A0, (const s64)im, ctx); + ret = emit_call((const u64)__bpf_tramp_exit, true, ctx); + if (ret) + goto out; + } + + if (flags & BPF_TRAMP_F_RESTORE_REGS) + restore_args(nregs, args_off, ctx); + + if (save_ret) + emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx); + + emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx); + + if (flags & BPF_TRAMP_F_SKIP_FRAME) + /* return address of parent function */ + emit_ld(RV_REG_RA, stack_size - 8, RV_REG_SP, ctx); + else + /* return address of traced function */ + emit_ld(RV_REG_RA, stack_size - retaddr_off, RV_REG_SP, ctx); + + emit_ld(RV_REG_FP, stack_size - fp_off, RV_REG_SP, ctx); + emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx); + + emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx); + + ret = ctx->ninsns; +out: + kfree(branches_off); + return ret; +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, + void *image_end, const struct btf_func_model *m, + u32 flags, struct bpf_tramp_links *tlinks, + void *func_addr) +{ + int ret; + struct rv_jit_context ctx; + + ctx.ninsns = 0; + ctx.insns = NULL; + ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); + if (ret < 0) + return ret; + + if (ninsns_rvoff(ret) > (long)image_end - (long)image) + return -EFBIG; + + ctx.ninsns = 0; + ctx.insns = image; + ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); + if (ret < 0) + return ret; + + bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns); + + return ninsns_rvoff(ret); +} + int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, bool extra_pass) { @@ -913,7 +1303,7 @@ out_be: /* JUMP off */ case BPF_JMP | BPF_JA: rvoff = rv_offset(i, off, ctx); - ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); + ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); if (ret) return ret; break; @@ -1032,17 +1422,20 @@ out_be: /* function call */ case BPF_JMP | BPF_CALL: { - bool fixed; + bool fixed_addr; u64 addr; mark_call(ctx); - ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, - &fixed); + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, + &addr, &fixed_addr); if (ret < 0) return ret; - ret = emit_call(fixed, addr, ctx); + + ret = emit_call(addr, fixed_addr, ctx); if (ret) return ret; + + emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx); break; } /* tail call */ @@ -1057,7 +1450,7 @@ out_be: break; rvoff = epilogue_offset(ctx); - ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); + ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); if (ret) return ret; break; @@ -1270,7 +1663,7 @@ out_be: void bpf_jit_build_prologue(struct rv_jit_context *ctx) { - int stack_adjust = 0, store_offset, bpf_stack_adjust; + int i, stack_adjust = 0, store_offset, bpf_stack_adjust; bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); if (bpf_stack_adjust) @@ -1297,6 +1690,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx) store_offset = stack_adjust - 8; + /* reserve 4 nop insns */ + for (i = 0; i < 4; i++) + emit(rv_nop(), ctx); + /* First instruction is always setting the tail-call-counter * (TCC) register. This instruction is skipped for tail calls. * Force using a 4-byte (non-compressed) instruction. |