From 118ae46b794271ebcfcc9bab95e1c766198c8209 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Wed, 7 May 2025 03:42:55 +0000 Subject: bpf, riscv64: Introduce emit_load_*() and emit_store_*() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're planning to add support for the load-acquire and store-release BPF instructions. Define emit_load_() and emit_store_() to enable/facilitate the (re)use of their code. Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Tested-by: Peilin Ye Signed-off-by: Andrea Parri [yepeilin@google.com: cosmetic change to commit title] Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/fce89473a5748e1631d18a5917d953460d1ae0d0.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- arch/riscv/net/bpf_jit_comp64.c | 242 ++++++++++++++++++++++++---------------- 1 file changed, 143 insertions(+), 99 deletions(-) (limited to 'arch') diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index ca60db75199d..953b6a20c69f 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -473,6 +473,140 @@ static inline void emit_kcfi(u32 hash, struct rv_jit_context *ctx) emit(hash, ctx); } +static int emit_load_8(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lb(rd, off, rs), ctx); + else + emit(rv_lbu(rd, off, rs), ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lb(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lbu(rd, 0, RV_REG_T1), ctx); + return ctx->ninsns - insns_start; +} + +static int emit_load_16(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lh(rd, off, rs), ctx); + else + emit(rv_lhu(rd, off, rs), ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lh(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lhu(rd, 0, RV_REG_T1), ctx); + return ctx->ninsns - insns_start; +} + +static int emit_load_32(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lw(rd, off, rs), ctx); + else + emit(rv_lwu(rd, off, rs), ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lw(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lwu(rd, 0, RV_REG_T1), ctx); + return ctx->ninsns - insns_start; +} + +static int emit_load_64(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + emit_ld(rd, off, rs, ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + emit_ld(rd, 0, RV_REG_T1, ctx); + return ctx->ninsns - insns_start; +} + +static void emit_store_8(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit(rv_sb(rd, off, rs), ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit(rv_sb(RV_REG_T1, 0, rs), ctx); +} + +static void emit_store_16(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit(rv_sh(rd, off, rs), ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit(rv_sh(RV_REG_T1, 0, rs), ctx); +} + +static void emit_store_32(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit_sw(rd, off, rs, ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit_sw(RV_REG_T1, 0, rs, ctx); +} + +static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit_sd(rd, off, rs, ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit_sd(RV_REG_T1, 0, rs, ctx); +} + static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, struct rv_jit_context *ctx) { @@ -1650,8 +1784,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: { - int insn_len, insns_start; bool sign_ext; + int insn_len; sign_ext = BPF_MODE(insn->code) == BPF_MEMSX || BPF_MODE(insn->code) == BPF_PROBE_MEMSX; @@ -1663,78 +1797,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, switch (BPF_SIZE(code)) { case BPF_B: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lb(rd, off, rs), ctx); - else - emit(rv_lbu(rd, off, rs), ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lb(rd, 0, RV_REG_T1), ctx); - else - emit(rv_lbu(rd, 0, RV_REG_T1), ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_8(sign_ext, rd, off, rs, ctx); break; case BPF_H: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lh(rd, off, rs), ctx); - else - emit(rv_lhu(rd, off, rs), ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lh(rd, 0, RV_REG_T1), ctx); - else - emit(rv_lhu(rd, 0, RV_REG_T1), ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_16(sign_ext, rd, off, rs, ctx); break; case BPF_W: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lw(rd, off, rs), ctx); - else - emit(rv_lwu(rd, off, rs), ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lw(rd, 0, RV_REG_T1), ctx); - else - emit(rv_lwu(rd, 0, RV_REG_T1), ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_32(sign_ext, rd, off, rs, ctx); break; case BPF_DW: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - emit_ld(rd, off, rs, ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - emit_ld(rd, 0, RV_REG_T1, ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_64(sign_ext, rd, off, rs, ctx); break; } @@ -1879,44 +1951,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* STX: *(size *)(dst + off) = src */ case BPF_STX | BPF_MEM | BPF_B: - if (is_12b_int(off)) { - emit(rv_sb(rd, off, rs), ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit(rv_sb(RV_REG_T1, 0, rs), ctx); + emit_store_8(rd, off, rs, ctx); break; case BPF_STX | BPF_MEM | BPF_H: - if (is_12b_int(off)) { - emit(rv_sh(rd, off, rs), ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit(rv_sh(RV_REG_T1, 0, rs), ctx); + emit_store_16(rd, off, rs, ctx); break; case BPF_STX | BPF_MEM | BPF_W: - if (is_12b_int(off)) { - emit_sw(rd, off, rs, ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit_sw(RV_REG_T1, 0, rs, ctx); + emit_store_32(rd, off, rs, ctx); break; case BPF_STX | BPF_MEM | BPF_DW: - if (is_12b_int(off)) { - emit_sd(rd, off, rs, ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit_sd(RV_REG_T1, 0, rs, ctx); + emit_store_64(rd, off, rs, ctx); break; case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: -- cgit v1.2.3 From 8afd3170d5116385740aef8ab77d10b83f9b8e60 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Wed, 7 May 2025 03:43:01 +0000 Subject: bpf, riscv64: Support load-acquire and store-release instructions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support BPF load-acquire (BPF_LOAD_ACQ) and store-release (BPF_STORE_REL) instructions in the riscv64 JIT compiler. For example, consider the following 64-bit load-acquire (assuming little-endian): db 10 00 00 00 01 00 00 r1 = load_acquire((u64 *)(r1 + 0x0)) 95 00 00 00 00 00 00 00 exit opcode (0xdb): BPF_ATOMIC | BPF_DW | BPF_STX imm (0x00000100): BPF_LOAD_ACQ The JIT compiler will emit an LD instruction followed by a FENCE R,RW instruction for the above, e.g.: ld x7,0(x6) fence r,rw Similarly, consider the following 16-bit store-release: cb 21 00 00 10 01 00 00 store_release((u16 *)(r1 + 0x0), w2) 95 00 00 00 00 00 00 00 exit opcode (0xcb): BPF_ATOMIC | BPF_H | BPF_STX imm (0x00000110): BPF_STORE_REL A FENCE RW,W instruction followed by an SH instruction will be emitted, e.g.: fence rw,w sh x2,0(x4) 8-bit and 16-bit load-acquires are zero-extending (cf., LBU, LHU). The verifier always rejects misaligned load-acquires/store-releases (even if BPF_F_ANY_ALIGNMENT is set), so the emitted load and store instructions are guaranteed to be single-copy atomic. Introduce primitives to emit the relevant (and the most common/used in the kernel) fences, i.e. fences with R -> RW, RW -> W and RW -> RW. Rename emit_atomic() to emit_atomic_rmw() to make it clear that it only handles RMW atomics, and replace its is64 parameter to allow to perform the required checks on the opsize (BPF_SIZE(code)). Acked-by: Björn Töpel Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Andrea Parri Co-developed-by: Peilin Ye Signed-off-by: Peilin Ye Reviewed-by: Pu Lehui Link: https://lore.kernel.org/r/3059c560e537ad43ed19055d2ebbd970c698095a.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- arch/riscv/net/bpf_jit.h | 15 +++++++++ arch/riscv/net/bpf_jit_comp64.c | 75 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 85 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index 1d1c78d4cff1..e7b032dfd17f 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -608,6 +608,21 @@ static inline u32 rv_fence(u8 pred, u8 succ) return rv_i_insn(imm11_0, 0, 0, 0, 0xf); } +static inline void emit_fence_r_rw(struct rv_jit_context *ctx) +{ + emit(rv_fence(0x2, 0x3), ctx); +} + +static inline void emit_fence_rw_w(struct rv_jit_context *ctx) +{ + emit(rv_fence(0x3, 0x1), ctx); +} + +static inline void emit_fence_rw_rw(struct rv_jit_context *ctx) +{ + emit(rv_fence(0x3, 0x3), ctx); +} + static inline u32 rv_nop(void) { return rv_i_insn(0, 0, 0, 0, 0x13); diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 953b6a20c69f..8767f032f2de 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -607,11 +607,65 @@ static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) emit_sd(RV_REG_T1, 0, rs, ctx); } -static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, - struct rv_jit_context *ctx) +static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_jit_context *ctx) +{ + switch (imm) { + /* dst_reg = load_acquire(src_reg + off16) */ + case BPF_LOAD_ACQ: + switch (BPF_SIZE(code)) { + case BPF_B: + emit_load_8(false, rd, off, rs, ctx); + break; + case BPF_H: + emit_load_16(false, rd, off, rs, ctx); + break; + case BPF_W: + emit_load_32(false, rd, off, rs, ctx); + break; + case BPF_DW: + emit_load_64(false, rd, off, rs, ctx); + break; + } + emit_fence_r_rw(ctx); + break; + /* store_release(dst_reg + off16, src_reg) */ + case BPF_STORE_REL: + emit_fence_rw_w(ctx); + switch (BPF_SIZE(code)) { + case BPF_B: + emit_store_8(rd, off, rs, ctx); + break; + case BPF_H: + emit_store_16(rd, off, rs, ctx); + break; + case BPF_W: + emit_store_32(rd, off, rs, ctx); + break; + case BPF_DW: + emit_store_64(rd, off, rs, ctx); + break; + } + break; + default: + pr_err_once("bpf-jit: invalid atomic load/store opcode %02x\n", imm); + return -EINVAL; + } + + return 0; +} + +static int emit_atomic_rmw(u8 rd, u8 rs, s16 off, s32 imm, u8 code, + struct rv_jit_context *ctx) { u8 r0; int jmp_offset; + bool is64; + + if (BPF_SIZE(code) != BPF_W && BPF_SIZE(code) != BPF_DW) { + pr_err_once("bpf-jit: 1- and 2-byte RMW atomics are not supported\n"); + return -EINVAL; + } + is64 = BPF_SIZE(code) == BPF_DW; if (off) { if (is_12b_int(off)) { @@ -688,9 +742,14 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx); jmp_offset = ninsns_rvoff(-6); emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx); - emit(rv_fence(0x3, 0x3), ctx); + emit_fence_rw_rw(ctx); break; + default: + pr_err_once("bpf-jit: invalid atomic RMW opcode %02x\n", imm); + return -EINVAL; } + + return 0; } #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) @@ -1962,10 +2021,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_STX | BPF_MEM | BPF_DW: emit_store_64(rd, off, rs, ctx); break; + case BPF_STX | BPF_ATOMIC | BPF_B: + case BPF_STX | BPF_ATOMIC | BPF_H: case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: - emit_atomic(rd, rs, off, imm, - BPF_SIZE(code) == BPF_DW, ctx); + if (bpf_atomic_is_load_store(insn)) + ret = emit_atomic_ld_st(rd, rs, off, imm, code, ctx); + else + ret = emit_atomic_rmw(rd, rs, off, imm, code, ctx); + if (ret) + return ret; break; case BPF_STX | BPF_PROBE_MEM32 | BPF_B: -- cgit v1.2.3 From db7a3822b5f474b09db0a776e91f17c8b7d32137 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 7 May 2025 03:43:07 +0000 Subject: bpf, riscv64: Skip redundant zext instruction after load-acquire MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the verifier inserts a zext instruction right after every 8-, 16- or 32-bit load-acquire, which is already zero-extending. Skip such redundant zext instructions. While we are here, update that already-obsolete comment about "skip the next instruction" in build_body(). Also change emit_atomic_rmw()'s parameters to keep it consistent with emit_atomic_ld_st(). Note that checking 'insn[1]' relies on 'insn' not being the last instruction, which should have been guaranteed by the verifier; we already use 'insn[1]' elsewhere in the file for similar purposes. Additionally, we don't check if 'insn[1]' is actually a zext for our load-acquire's dst_reg, or some other registers - in other words, here we are relying on the verifier to always insert a redundant zext right after a 8/16/32-bit load-acquire, for its dst_reg. Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/10e90e0eab042f924d35ad0d1c1f7ca29f673152.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- arch/riscv/net/bpf_jit_comp64.c | 23 ++++++++++++++++++----- arch/riscv/net/bpf_jit_core.c | 3 +-- 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 8767f032f2de..10e01ff06312 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -607,8 +607,13 @@ static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) emit_sd(RV_REG_T1, 0, rs, ctx); } -static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_jit_context *ctx) +static int emit_atomic_ld_st(u8 rd, u8 rs, const struct bpf_insn *insn, + struct rv_jit_context *ctx) { + u8 code = insn->code; + s32 imm = insn->imm; + s16 off = insn->off; + switch (imm) { /* dst_reg = load_acquire(src_reg + off16) */ case BPF_LOAD_ACQ: @@ -627,6 +632,12 @@ static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_ break; } emit_fence_r_rw(ctx); + + /* If our next insn is a redundant zext, return 1 to tell + * build_body() to skip it. + */ + if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1])) + return 1; break; /* store_release(dst_reg + off16, src_reg) */ case BPF_STORE_REL: @@ -654,10 +665,12 @@ static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_ return 0; } -static int emit_atomic_rmw(u8 rd, u8 rs, s16 off, s32 imm, u8 code, +static int emit_atomic_rmw(u8 rd, u8 rs, const struct bpf_insn *insn, struct rv_jit_context *ctx) { - u8 r0; + u8 r0, code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; int jmp_offset; bool is64; @@ -2026,9 +2039,9 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: if (bpf_atomic_is_load_store(insn)) - ret = emit_atomic_ld_st(rd, rs, off, imm, code, ctx); + ret = emit_atomic_ld_st(rd, rs, insn, ctx); else - ret = emit_atomic_rmw(rd, rs, off, imm, code, ctx); + ret = emit_atomic_rmw(rd, rs, insn, ctx); if (ret) return ret; break; diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index f8cd2f70a7fb..f6ca5cfa6b2f 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -26,9 +26,8 @@ static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset) int ret; ret = bpf_jit_emit_insn(insn, ctx, extra_pass); - /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */ if (ret > 0) - i++; + i++; /* skip the next instruction */ if (offset) offset[i] = ctx->ninsns; if (ret < 0) -- cgit v1.2.3 From 5f55f2168432298f5a55294831ab6a76a10cb3c3 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 12 May 2025 14:26:15 +0200 Subject: s390/bpf: Store backchain even for leaf progs Currently a crash in a leaf prog (caused by a bug) produces the following call trace: [<000003ff600ebf00>] bpf_prog_6df0139e1fbf2789_fentry+0x20/0x78 [<0000000000000000>] 0x0 This is because leaf progs do not store backchain. Fix by making all progs do it. This is what GCC and Clang-generated code does as well. Now the call trace looks like this: [<000003ff600eb0f2>] bpf_prog_6df0139e1fbf2789_fentry+0x2a/0x80 [<000003ff600ed096>] bpf_trampoline_201863462940+0x96/0xf4 [<000003ff600e3a40>] bpf_prog_05f379658fdd72f2_classifier_0+0x58/0xc0 [<000003ffe0aef070>] bpf_test_run+0x210/0x390 [<000003ffe0af0dc2>] bpf_prog_test_run_skb+0x25a/0x668 [<000003ffe038a90e>] __sys_bpf+0xa46/0xdb0 [<000003ffe038ad0c>] __s390x_sys_bpf+0x44/0x50 [<000003ffe0defea8>] __do_syscall+0x150/0x280 [<000003ffe0e01d5c>] system_call+0x74/0x98 Fixes: 054623105728 ("s390/bpf: Add s390x eBPF JIT compiler backend") Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250512122717.54878-1-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0776dfde2dba..945106b5562d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -605,17 +605,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* Setup stack and backchain */ if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - /* lgr %w1,%r15 (backchain) */ - EMIT4(0xb9040000, REG_W1, REG_15); + /* lgr %w1,%r15 (backchain) */ + EMIT4(0xb9040000, REG_W1, REG_15); /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); /* aghi %r15,-STK_OFF */ EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - /* stg %w1,152(%r15) (backchain) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, - REG_15, 152); + /* stg %w1,152(%r15) (backchain) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, + REG_15, 152); } } -- cgit v1.2.3 From 8e57cf09c84cac99eb31354a3cc70f8b8981bfc2 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 12 May 2025 22:57:31 +0200 Subject: s390/bpf: Remove the orig_call NULL check Now that orig_call can never be NULL, remove the respective check. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250512221911.61314-3-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 945106b5562d..6f22b5199c20 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2583,9 +2583,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, if (nr_stack_args > MAX_NR_STACK_ARGS) return -ENOTSUPP; - /* Return to %r14, since func_addr and %r0 are not available. */ - if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) || - (flags & BPF_TRAMP_F_INDIRECT)) + /* Return to %r14 in the struct_ops case. */ + if (flags & BPF_TRAMP_F_INDIRECT) flags |= BPF_TRAMP_F_SKIP_FRAME; /* -- cgit v1.2.3 From f7562001c8b854390899b53d06ba4202c89339e6 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 19 May 2025 23:30:04 +0100 Subject: s390: always declare expoline thunks It would be convenient to use the following pattern in the BPF JIT: if (nospec_uses_trampoline()) emit_call(__s390_indirect_jump_r1); Unfortunately with CONFIG_EXPOLINE=n the compiler complains about the missing prototype of __s390_indirect_jump_r1(). One could wrap the whole "if" statement in an #ifdef, but this clutters the code. Instead, declare expoline thunk prototypes even when compiling without expolines. When using the above code structure and compiling without expolines, references to them are optimized away, and there are no linker errors. Acked-by: Heiko Carstens Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250519223646.66382-2-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/include/asm/nospec-branch.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h index 192835a3e24d..c7c96282f011 100644 --- a/arch/s390/include/asm/nospec-branch.h +++ b/arch/s390/include/asm/nospec-branch.h @@ -26,8 +26,6 @@ static inline bool nospec_uses_trampoline(void) return __is_defined(CC_USING_EXPOLINE) && !nospec_disable; } -#ifdef CONFIG_EXPOLINE_EXTERN - void __s390_indirect_jump_r1(void); void __s390_indirect_jump_r2(void); void __s390_indirect_jump_r3(void); @@ -44,8 +42,6 @@ void __s390_indirect_jump_r13(void); void __s390_indirect_jump_r14(void); void __s390_indirect_jump_r15(void); -#endif - #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_EXPOLINE_H */ -- cgit v1.2.3 From 9053ba042fc7c0e718566932288cc88b3bb2dbe1 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 19 May 2025 23:30:05 +0100 Subject: s390/bpf: Add macros for calling external functions After the V!=R rework (commit c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces")), kernel and BPF programs are allocated within a 4G region, making it possible to use relative addressing to directly use kernel functions from BPF code. Add two new macros for calling kernel functions from BPF code: EMIT6_PCREL_RILB_PTR() and EMIT6_PCREL_RILC_PTR(). Factor out parts of the existing macros that are helpful for implementing the new ones. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250519223646.66382-3-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 60 +++++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 6f22b5199c20..70a93e03bfb3 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -127,6 +127,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) jit->seen_regs |= (1 << r1); } +static s32 off_to_pcrel(struct bpf_jit *jit, u32 off) +{ + return off - jit->prg; +} + +static s64 ptr_to_pcrel(struct bpf_jit *jit, const void *ptr) +{ + if (jit->prg_buf) + return (const u8 *)ptr - ((const u8 *)jit->prg_buf + jit->prg); + return 0; +} + #define REG_SET_SEEN(b1) \ ({ \ reg_set_seen(jit, b1); \ @@ -201,7 +213,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT4_PCREL_RIC(op, mask, target) \ ({ \ - int __rel = ((target) - jit->prg) / 2; \ + int __rel = off_to_pcrel(jit, target) / 2; \ _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \ }) @@ -239,7 +251,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \ ({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ + unsigned int rel = off_to_pcrel(jit, target) / 2; \ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ (op2) | (mask) << 12); \ REG_SET_SEEN(b1); \ @@ -248,7 +260,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \ ({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ + unsigned int rel = off_to_pcrel(jit, target) / 2; \ _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ REG_SET_SEEN(b1); \ @@ -257,29 +269,41 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ ({ \ - int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \ + int rel = off_to_pcrel(jit, addrs[(i) + (off) + 1]) / 2;\ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) +static void emit6_pcrel_ril(struct bpf_jit *jit, u32 op, s64 pcrel) +{ + u32 pc32dbl = (s32)(pcrel / 2); + + _EMIT6(op | pc32dbl >> 16, pc32dbl & 0xffff); +} + +static void emit6_pcrel_rilb(struct bpf_jit *jit, u32 op, u8 b, s64 pcrel) +{ + emit6_pcrel_ril(jit, op | reg_high(b) << 16, pcrel); + REG_SET_SEEN(b); +} + #define EMIT6_PCREL_RILB(op, b, target) \ -({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ - _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\ - REG_SET_SEEN(b); \ -}) + emit6_pcrel_rilb(jit, op, b, off_to_pcrel(jit, target)) -#define EMIT6_PCREL_RIL(op, target) \ -({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ - _EMIT6((op) | rel >> 16, rel & 0xffff); \ -}) +#define EMIT6_PCREL_RILB_PTR(op, b, target_ptr) \ + emit6_pcrel_rilb(jit, op, b, ptr_to_pcrel(jit, target_ptr)) + +static void emit6_pcrel_rilc(struct bpf_jit *jit, u32 op, u8 mask, s64 pcrel) +{ + emit6_pcrel_ril(jit, op | mask << 20, pcrel); +} #define EMIT6_PCREL_RILC(op, mask, target) \ -({ \ - EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \ -}) + emit6_pcrel_rilc(jit, op, mask, off_to_pcrel(jit, target)) + +#define EMIT6_PCREL_RILC_PTR(op, mask, target_ptr) \ + emit6_pcrel_rilc(jit, op, mask, ptr_to_pcrel(jit, target_ptr)) #define _EMIT6_IMM(op, imm) \ ({ \ @@ -503,7 +527,7 @@ static void bpf_skip(struct bpf_jit *jit, int size) { if (size >= 6 && !is_valid_rel(size)) { /* brcl 0xf,size */ - EMIT6_PCREL_RIL(0xc0f4000000, size); + EMIT6_PCREL_RILC(0xc0040000, 0xf, size); size -= 6; } else if (size >= 4 && is_valid_rel(size)) { /* brc 0xf,size */ -- cgit v1.2.3 From 7f332f9fe9d854c1680fea223b6cb04530859a1a Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 19 May 2025 23:30:06 +0100 Subject: s390/bpf: Use kernel's expoline thunks Simplify the JIT code by replacing the custom expolines with the ones defined in the kernel text. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250519223646.66382-4-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 61 ++++++++++++-------------------------------- 1 file changed, 17 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 70a93e03bfb3..c7f8313ba449 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -48,8 +48,6 @@ struct bpf_jit { int lit64; /* Current position in 64-bit literal pool */ int base_ip; /* Base address for literal pool */ int exit_ip; /* Address of exit */ - int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ - int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ int tail_call_start; /* Tail call start offset */ int excnt; /* Number of exception table entries */ int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ @@ -642,28 +640,17 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* - * Emit an expoline for a jump that follows + * Jump using a register either directly or via an expoline thunk */ -static void emit_expoline(struct bpf_jit *jit) -{ - /* exrl %r0,.+10 */ - EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); - /* j . */ - EMIT4_PCREL(0xa7f40000, 0); -} - -/* - * Emit __s390_indirect_jump_r1 thunk if necessary - */ -static void emit_r1_thunk(struct bpf_jit *jit) -{ - if (nospec_uses_trampoline()) { - jit->r1_thunk_ip = jit->prg; - emit_expoline(jit); - /* br %r1 */ - _EMIT2(0x07f1); - } -} +#define EMIT_JUMP_REG(reg) do { \ + if (nospec_uses_trampoline()) \ + /* brcl 0xf,__s390_indirect_jump_rN */ \ + EMIT6_PCREL_RILC_PTR(0xc0040000, 0x0f, \ + __s390_indirect_jump_r ## reg); \ + else \ + /* br %rN */ \ + _EMIT2(0x07f0 | reg); \ +} while (0) /* * Call r1 either directly or via __s390_indirect_jump_r1 thunk @@ -672,7 +659,8 @@ static void call_r1(struct bpf_jit *jit) { if (nospec_uses_trampoline()) /* brasl %r14,__s390_indirect_jump_r1 */ - EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); + EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, + __s390_indirect_jump_r1); else /* basr %r14,%r1 */ EMIT2(0x0d00, REG_14, REG_1); @@ -688,16 +676,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); - if (nospec_uses_trampoline()) { - jit->r14_thunk_ip = jit->prg; - /* Generate __s390_indirect_jump_r14 thunk */ - emit_expoline(jit); - } - /* br %r14 */ - _EMIT2(0x07fe); - - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - emit_r1_thunk(jit); + EMIT_JUMP_REG(14); jit->prg = ALIGN(jit->prg, 8); jit->prologue_plt = jit->prg; @@ -1899,7 +1878,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* aghi %r1,tail_call_start */ EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start); /* brcl 0xf,__s390_indirect_jump_r1 */ - EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip); + EMIT6_PCREL_RILC_PTR(0xc0040000, 0xf, + __s390_indirect_jump_r1); } else { /* bc 0xf,tail_call_start(%r1) */ _EMIT4(0x47f01000 + jit->tail_call_start); @@ -2868,17 +2848,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, 0xf000 | tjit->tccnt_off); /* aghi %r15,stack_size */ EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size); - /* Emit an expoline for the following indirect jump. */ - if (nospec_uses_trampoline()) - emit_expoline(jit); if (flags & BPF_TRAMP_F_SKIP_FRAME) - /* br %r14 */ - _EMIT2(0x07fe); + EMIT_JUMP_REG(14); else - /* br %r1 */ - _EMIT2(0x07f1); - - emit_r1_thunk(jit); + EMIT_JUMP_REG(1); return 0; } -- cgit v1.2.3 From 9014cf56f13d8ce21329837c148f723490950e28 Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Tue, 27 May 2025 12:06:03 +0200 Subject: bpf, arm64: Support up to 12 function arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently ARM64 bpf trampoline supports up to 8 function arguments. According to the statistics from commit 473e3150e30a ("bpf, x86: allow function arguments up to 12 for TRACING"), there are about 200 functions accept 9 to 12 arguments, so adding support for up to 12 function arguments. Due to bpf only supporting function arguments up to 16 bytes, according to AAPCS64, starting from the first argument, each argument is first attempted to be loaded to 1 or 2 smallest registers from x0-x7, if there are no enough registers to hold the entire argument, then all remaining arguments starting from this one are pushed to the stack for passing. There are some non-trivial cases for which it is not possible to correctly read arguments from/write arguments to the stack: for example struct variables may have custom packing/alignment attributes that are invisible in BTF info. Such cases are denied for now to make sure not to read incorrect values. Signed-off-by: Xu Kuohai Co-developed-by: Alexis Lothoré (eBPF Foundation) Signed-off-by: Alexis Lothoré (eBPF Foundation) Link: https://lore.kernel.org/r/20250527-many_args_arm64-v3-1-3faf7bb8e4a2@bootlin.com Signed-off-by: Alexei Starovoitov --- arch/arm64/net/bpf_jit_comp.c | 225 ++++++++++++++++++++++++++++++++---------- 1 file changed, 171 insertions(+), 54 deletions(-) (limited to 'arch') diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 70d7c89d3ac9..b5c3ab623536 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2064,7 +2064,7 @@ bool bpf_jit_supports_subprog_tailcalls(void) } static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, - int args_off, int retval_off, int run_ctx_off, + int bargs_off, int retval_off, int run_ctx_off, bool save_ret) { __le32 *branch; @@ -2106,7 +2106,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, branch = ctx->image + ctx->idx; emit(A64_NOP, ctx); - emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx); + emit(A64_ADD_I(1, A64_R(0), A64_SP, bargs_off), ctx); if (!p->jited) emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx); @@ -2131,7 +2131,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, } static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, - int args_off, int retval_off, int run_ctx_off, + int bargs_off, int retval_off, int run_ctx_off, __le32 **branches) { int i; @@ -2141,7 +2141,7 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, */ emit(A64_STR64I(A64_ZR, A64_SP, retval_off), ctx); for (i = 0; i < tl->nr_links; i++) { - invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, + invoke_bpf_prog(ctx, tl->links[i], bargs_off, retval_off, run_ctx_off, true); /* if (*(u64 *)(sp + retval_off) != 0) * goto do_fexit; @@ -2155,23 +2155,125 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, } } -static void save_args(struct jit_ctx *ctx, int args_off, int nregs) +struct arg_aux { + /* how many args are passed through registers, the rest of the args are + * passed through stack + */ + int args_in_regs; + /* how many registers are used to pass arguments */ + int regs_for_args; + /* how much stack is used for additional args passed to bpf program + * that did not fit in original function registers + */ + int bstack_for_args; + /* home much stack is used for additional args passed to the + * original function when called from trampoline (this one needs + * arguments to be properly aligned) + */ + int ostack_for_args; +}; + +static int calc_arg_aux(const struct btf_func_model *m, + struct arg_aux *a) { - int i; + int stack_slots, nregs, slots, i; + + /* verifier ensures m->nr_args <= MAX_BPF_FUNC_ARGS */ + for (i = 0, nregs = 0; i < m->nr_args; i++) { + slots = (m->arg_size[i] + 7) / 8; + if (nregs + slots <= 8) /* passed through register ? */ + nregs += slots; + else + break; + } + + a->args_in_regs = i; + a->regs_for_args = nregs; + a->ostack_for_args = 0; + a->bstack_for_args = 0; - for (i = 0; i < nregs; i++) { - emit(A64_STR64I(i, A64_SP, args_off), ctx); - args_off += 8; + /* the rest arguments are passed through stack */ + for (; i < m->nr_args; i++) { + /* We can not know for sure about exact alignment needs for + * struct passed on stack, so deny those + */ + if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) + return -ENOTSUPP; + stack_slots = (m->arg_size[i] + 7) / 8; + a->bstack_for_args += stack_slots * 8; + a->ostack_for_args = a->ostack_for_args + stack_slots * 8; + } + + return 0; +} + +static void clear_garbage(struct jit_ctx *ctx, int reg, int effective_bytes) +{ + if (effective_bytes) { + int garbage_bits = 64 - 8 * effective_bytes; +#ifdef CONFIG_CPU_BIG_ENDIAN + /* garbage bits are at the right end */ + emit(A64_LSR(1, reg, reg, garbage_bits), ctx); + emit(A64_LSL(1, reg, reg, garbage_bits), ctx); +#else + /* garbage bits are at the left end */ + emit(A64_LSL(1, reg, reg, garbage_bits), ctx); + emit(A64_LSR(1, reg, reg, garbage_bits), ctx); +#endif } } -static void restore_args(struct jit_ctx *ctx, int args_off, int nregs) +static void save_args(struct jit_ctx *ctx, int bargs_off, int oargs_off, + const struct btf_func_model *m, + const struct arg_aux *a, + bool for_call_origin) { int i; + int reg; + int doff; + int soff; + int slots; + u8 tmp = bpf2a64[TMP_REG_1]; + + /* store arguments to the stack for the bpf program, or restore + * arguments from stack for the original function + */ + for (reg = 0; reg < a->regs_for_args; reg++) { + emit(for_call_origin ? + A64_LDR64I(reg, A64_SP, bargs_off) : + A64_STR64I(reg, A64_SP, bargs_off), + ctx); + bargs_off += 8; + } - for (i = 0; i < nregs; i++) { - emit(A64_LDR64I(i, A64_SP, args_off), ctx); - args_off += 8; + soff = 32; /* on stack arguments start from FP + 32 */ + doff = (for_call_origin ? oargs_off : bargs_off); + + /* save on stack arguments */ + for (i = a->args_in_regs; i < m->nr_args; i++) { + slots = (m->arg_size[i] + 7) / 8; + /* verifier ensures arg_size <= 16, so slots equals 1 or 2 */ + while (slots-- > 0) { + emit(A64_LDR64I(tmp, A64_FP, soff), ctx); + /* if there is unused space in the last slot, clear + * the garbage contained in the space. + */ + if (slots == 0 && !for_call_origin) + clear_garbage(ctx, tmp, m->arg_size[i] % 8); + emit(A64_STR64I(tmp, A64_SP, doff), ctx); + soff += 8; + doff += 8; + } + } +} + +static void restore_args(struct jit_ctx *ctx, int bargs_off, int nregs) +{ + int reg; + + for (reg = 0; reg < nregs; reg++) { + emit(A64_LDR64I(reg, A64_SP, bargs_off), ctx); + bargs_off += 8; } } @@ -2194,17 +2296,21 @@ static bool is_struct_ops_tramp(const struct bpf_tramp_links *fentry_links) */ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, struct bpf_tramp_links *tlinks, void *func_addr, - int nregs, u32 flags) + const struct btf_func_model *m, + const struct arg_aux *a, + u32 flags) { int i; int stack_size; int retaddr_off; int regs_off; int retval_off; - int args_off; - int nregs_off; + int bargs_off; + int nfuncargs_off; int ip_off; int run_ctx_off; + int oargs_off; + int nfuncargs; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; @@ -2213,31 +2319,38 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, bool is_struct_ops = is_struct_ops_tramp(fentry); /* trampoline stack layout: - * [ parent ip ] - * [ FP ] - * SP + retaddr_off [ self ip ] - * [ FP ] + * [ parent ip ] + * [ FP ] + * SP + retaddr_off [ self ip ] + * [ FP ] * - * [ padding ] align SP to multiples of 16 + * [ padding ] align SP to multiples of 16 * - * [ x20 ] callee saved reg x20 - * SP + regs_off [ x19 ] callee saved reg x19 + * [ x20 ] callee saved reg x20 + * SP + regs_off [ x19 ] callee saved reg x19 * - * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or - * BPF_TRAMP_F_RET_FENTRY_RET + * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or + * BPF_TRAMP_F_RET_FENTRY_RET + * [ arg reg N ] + * [ ... ] + * SP + bargs_off [ arg reg 1 ] for bpf * - * [ arg reg N ] - * [ ... ] - * SP + args_off [ arg reg 1 ] + * SP + nfuncargs_off [ arg regs count ] * - * SP + nregs_off [ arg regs count ] + * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag * - * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag + * SP + run_ctx_off [ bpf_tramp_run_ctx ] * - * SP + run_ctx_off [ bpf_tramp_run_ctx ] + * [ stack arg N ] + * [ ... ] + * SP + oargs_off [ stack arg 1 ] for original func */ stack_size = 0; + oargs_off = stack_size; + if (flags & BPF_TRAMP_F_CALL_ORIG) + stack_size += a->ostack_for_args; + run_ctx_off = stack_size; /* room for bpf_tramp_run_ctx */ stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8); @@ -2247,13 +2360,14 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_IP_ARG) stack_size += 8; - nregs_off = stack_size; + nfuncargs_off = stack_size; /* room for args count */ stack_size += 8; - args_off = stack_size; + bargs_off = stack_size; /* room for args */ - stack_size += nregs * 8; + nfuncargs = a->regs_for_args + a->bstack_for_args / 8; + stack_size += 8 * nfuncargs; /* room for return value */ retval_off = stack_size; @@ -2300,11 +2414,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } /* save arg regs count*/ - emit(A64_MOVZ(1, A64_R(10), nregs, 0), ctx); - emit(A64_STR64I(A64_R(10), A64_SP, nregs_off), ctx); + emit(A64_MOVZ(1, A64_R(10), nfuncargs, 0), ctx); + emit(A64_STR64I(A64_R(10), A64_SP, nfuncargs_off), ctx); - /* save arg regs */ - save_args(ctx, args_off, nregs); + /* save args for bpf */ + save_args(ctx, bargs_off, oargs_off, m, a, false); /* save callee saved registers */ emit(A64_STR64I(A64_R(19), A64_SP, regs_off), ctx); @@ -2320,7 +2434,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } for (i = 0; i < fentry->nr_links; i++) - invoke_bpf_prog(ctx, fentry->links[i], args_off, + invoke_bpf_prog(ctx, fentry->links[i], bargs_off, retval_off, run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET); @@ -2330,12 +2444,13 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (!branches) return -ENOMEM; - invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, + invoke_bpf_mod_ret(ctx, fmod_ret, bargs_off, retval_off, run_ctx_off, branches); } if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_args(ctx, args_off, nregs); + /* save args for original func */ + save_args(ctx, bargs_off, oargs_off, m, a, true); /* call original func */ emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx); emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx); @@ -2354,7 +2469,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } for (i = 0; i < fexit->nr_links; i++) - invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, + invoke_bpf_prog(ctx, fexit->links[i], bargs_off, retval_off, run_ctx_off, false); if (flags & BPF_TRAMP_F_CALL_ORIG) { @@ -2368,7 +2483,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_args(ctx, args_off, nregs); + restore_args(ctx, bargs_off, a->regs_for_args); /* restore callee saved register x19 and x20 */ emit(A64_LDR64I(A64_R(19), A64_SP, regs_off), ctx); @@ -2428,14 +2543,16 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, .idx = 0, }; struct bpf_tramp_image im; + struct arg_aux aaux; int nregs, ret; nregs = btf_func_model_nregs(m); - /* the first 8 registers are used for arguments */ - if (nregs > 8) - return -ENOTSUPP; - ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, nregs, flags); + ret = calc_arg_aux(m, &aaux); + if (ret < 0) + return ret; + + ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, m, &aaux, flags); if (ret < 0) return ret; @@ -2462,9 +2579,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { - int ret, nregs; - void *image, *tmp; u32 size = ro_image_end - ro_image; + struct arg_aux aaux; + void *image, *tmp; + int ret; /* image doesn't need to be in module memory range, so we can * use kvmalloc. @@ -2480,13 +2598,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, .write = true, }; - nregs = btf_func_model_nregs(m); - /* the first 8 registers are used for arguments */ - if (nregs > 8) - return -ENOTSUPP; jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); - ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags); + ret = calc_arg_aux(m, &aaux); + if (ret) + goto out; + ret = prepare_trampoline(&ctx, im, tlinks, func_addr, m, &aaux, flags); if (ret > 0 && validate_code(&ctx) < 0) { ret = -EINVAL; -- cgit v1.2.3 From c5cebb241e27ed0c3f4c1d2ce63089398e0ed17e Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 27 May 2025 17:27:04 -0700 Subject: bpf, arm64: Remove unused-but-set function and variable. Remove unused-but-set function and variable to fix the build warning: arch/arm64/net/bpf_jit_comp.c: In function 'arch_bpf_trampoline_size': 2547 | int nregs, ret; | ^~~~~ Fixes: 9014cf56f13d ("bpf, arm64: Support up to 12 function arguments") Reported-by: kernel test robot Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Acked-by: Xu Kuohai Link: https://lore.kernel.org/bpf/20250528002704.21197-1-alexei.starovoitov@gmail.com Closes: https://lore.kernel.org/oe-kbuild-all/202505280643.h0qYcSCM-lkp@intel.com/ --- arch/arm64/net/bpf_jit_comp.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b5c3ab623536..14d4c6ac4ca0 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2520,21 +2520,6 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, return ctx->idx; } -static int btf_func_model_nregs(const struct btf_func_model *m) -{ - int nregs = m->nr_args; - int i; - - /* extra registers needed for struct argument */ - for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) { - /* The arg_size is at most 16 bytes, enforced by the verifier. */ - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) - nregs += (m->arg_size[i] + 7) / 8 - 1; - } - - return nregs; -} - int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { @@ -2543,10 +2528,8 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, .idx = 0, }; struct bpf_tramp_image im; - struct arg_aux aaux; - int nregs, ret; - - nregs = btf_func_model_nregs(m); + struct arg_aux aaux; + int ret; ret = calc_arg_aux(m, &aaux); if (ret < 0) -- cgit v1.2.3