summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-17 15:58:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-17 15:58:22 -0700
commiteb0d6d97c27c29cd7392c8fd74f46edf7dff7ec2 (patch)
treefaec73a955172291535f227e5f20119292c1ca1c /arch/arm64
parent12bffaef28820e0b94c644c75708195c61af78f7 (diff)
parente1d486445af3c392628532229f7ce5f5cf7891b6 (diff)
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: "Most of the diff stat comes from Xu Kuohai's fix to emit ENDBR/BTI, since all JITs had to be touched to move constant blinding out and pass bpf_verifier_env in. - Fix use-after-free in arena_vm_close on fork (Alexei Starovoitov) - Dissociate struct_ops program with map if map_update fails (Amery Hung) - Fix out-of-range and off-by-one bugs in arm64 JIT (Daniel Borkmann) - Fix precedence bug in convert_bpf_ld_abs alignment check (Daniel Borkmann) - Fix arg tracking for imprecise/multi-offset in BPF_ST/STX insns (Eduard Zingerman) - Copy token from main to subprogs to fix missing kallsyms (Eduard Zingerman) - Prevent double close and leak of btf objects in libbpf (Jiri Olsa) - Fix af_unix null-ptr-deref in sockmap (Michal Luczaj) - Fix NULL deref in map_kptr_match_type for scalar regs (Mykyta Yatsenko) - Avoid unnecessary IPIs. Remove redundant bpf_flush_icache() in arm64 and riscv JITs (Puranjay Mohan) - Fix out of bounds access. Validate node_id in arena_alloc_pages() (Puranjay Mohan) - Reject BPF-to-BPF calls and callbacks in arm32 JIT (Puranjay Mohan) - Refactor all JITs to pass bpf_verifier_env to emit ENDBR/BTI for indirect jump targets on x86-64, arm64 JITs (Xu Kuohai) - Allow UTF-8 literals in bpf_bprintf_prepare() (Yihan Ding)" * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (32 commits) bpf, arm32: Reject BPF-to-BPF calls and callbacks in the JIT bpf: Dissociate struct_ops program with map if map_update fails bpf: Validate node_id in arena_alloc_pages() libbpf: Prevent double close and leak of btf objects selftests/bpf: cover UTF-8 trace_printk output bpf: allow UTF-8 literals in bpf_bprintf_prepare() selftests/bpf: Reject scalar store into kptr slot bpf: Fix NULL deref in map_kptr_match_type for scalar regs bpf: Fix precedence bug in convert_bpf_ld_abs alignment check bpf, arm64: Emit BTI for indirect jump target bpf, x86: Emit ENDBR for indirect jump targets bpf: Add helper to detect indirect jump targets bpf: Pass bpf_verifier_env to JIT bpf: Move constants blinding out of arch-specific JITs bpf, sockmap: Take state lock for af_unix iter bpf, sockmap: Fix af_unix null-ptr-deref in proto update selftests/bpf: Extend bpf_iter_unix to attempt deadlocking bpf, sockmap: Fix af_unix iter deadlock bpf, sockmap: Annotate af_unix sock:: Sk_state data-races selftests/bpf: verify kallsyms entries for token-loaded subprograms ...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/lib/insn.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c103
2 files changed, 33 insertions, 72 deletions
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index cc5b40917d0d..37ce75f7f1f0 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -338,6 +338,8 @@ u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
long offset;
offset = label_imm_common(pc, addr, SZ_1M);
+ if (offset >= SZ_1M)
+ return AARCH64_BREAK_FAULT;
insn = aarch64_insn_get_bcond_value();
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index adf84962d579..0816c40fc7af 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -18,7 +18,6 @@
#include <asm/asm-extable.h>
#include <asm/byteorder.h>
-#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
@@ -35,8 +34,8 @@
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
#define check_imm(bits, imm) do { \
- if ((((imm) > 0) && ((imm) >> (bits))) || \
- (((imm) < 0) && (~(imm) >> (bits)))) { \
+ if ((((imm) > 0) && ((imm) >> ((bits) - 1))) || \
+ (((imm) < 0) && (~(imm) >> ((bits) - 1)))) { \
pr_info("[%2d] imm=%d(0x%x) out of range\n", \
i, imm, imm); \
return -EINVAL; \
@@ -1198,8 +1197,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
* >0 - successfully JITed a 16-byte eBPF instruction.
* <0 - failed to JIT.
*/
-static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
- bool extra_pass)
+static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn *insn,
+ struct jit_ctx *ctx, bool extra_pass)
{
const u8 code = insn->code;
u8 dst = bpf2a64[insn->dst_reg];
@@ -1224,6 +1223,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
int ret;
bool sign_extend;
+ if (bpf_insn_is_indirect_target(env, ctx->prog, i))
+ emit_bti(A64_BTI_J, ctx);
+
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
@@ -1899,7 +1901,7 @@ emit_cond_jmp:
return 0;
}
-static int build_body(struct jit_ctx *ctx, bool extra_pass)
+static int build_body(struct bpf_verifier_env *env, struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
int i;
@@ -1918,7 +1920,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
int ret;
ctx->offset[i] = ctx->idx;
- ret = build_insn(insn, ctx, extra_pass);
+ ret = build_insn(env, insn, ctx, extra_pass);
if (ret > 0) {
i++;
ctx->offset[i] = ctx->idx;
@@ -1961,11 +1963,6 @@ static int validate_ctx(struct jit_ctx *ctx)
return 0;
}
-static inline void bpf_flush_icache(void *start, void *end)
-{
- flush_icache_range((unsigned long)start, (unsigned long)end);
-}
-
static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
{
int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
@@ -2006,17 +2003,15 @@ struct arm64_jit_data {
struct jit_ctx ctx;
};
-struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
{
int image_size, prog_size, extable_size, extable_align, extable_offset;
- struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
struct bpf_binary_header *ro_header = NULL;
struct arm64_jit_data *jit_data;
void __percpu *priv_stack_ptr = NULL;
bool was_classic = bpf_prog_was_classic(prog);
int priv_stack_alloc_sz;
- bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
u8 *image_ptr;
@@ -2025,26 +2020,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
int exentry_idx;
if (!prog->jit_requested)
- return orig_prog;
-
- tmp = bpf_jit_blind_constants(prog);
- /* If blinding was requested and we failed during blinding,
- * we must fall back to the interpreter.
- */
- if (IS_ERR(tmp))
- return orig_prog;
- if (tmp != prog) {
- tmp_blinded = true;
- prog = tmp;
- }
+ return prog;
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc_obj(*jit_data);
- if (!jit_data) {
- prog = orig_prog;
- goto out;
- }
+ if (!jit_data)
+ return prog;
prog->aux->jit_data = jit_data;
}
priv_stack_ptr = prog->aux->priv_stack_ptr;
@@ -2056,10 +2038,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 16) +
2 * PRIV_STACK_GUARD_SZ;
priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 16, GFP_KERNEL);
- if (!priv_stack_ptr) {
- prog = orig_prog;
+ if (!priv_stack_ptr)
goto out_priv_stack;
- }
priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
prog->aux->priv_stack_ptr = priv_stack_ptr;
@@ -2079,10 +2059,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
ctx.prog = prog;
ctx.offset = kvzalloc_objs(int, prog->len + 1);
- if (ctx.offset == NULL) {
- prog = orig_prog;
+ if (ctx.offset == NULL)
goto out_off;
- }
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
@@ -2095,15 +2073,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic)) {
- prog = orig_prog;
+ if (build_prologue(&ctx, was_classic))
goto out_off;
- }
- if (build_body(&ctx, extra_pass)) {
- prog = orig_prog;
+ if (build_body(env, &ctx, extra_pass))
goto out_off;
- }
ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx, was_classic);
@@ -2121,10 +2095,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
sizeof(u64), &header, &image_ptr,
jit_fill_hole);
- if (!ro_header) {
- prog = orig_prog;
+ if (!ro_header)
goto out_off;
- }
/* Pass 2: Determine jited position and result for each instruction */
@@ -2152,10 +2124,8 @@ skip_init_ctx:
/* Dont write body instructions to memory for now */
ctx.write = false;
- if (build_body(&ctx, extra_pass)) {
- prog = orig_prog;
+ if (build_body(env, &ctx, extra_pass))
goto out_free_hdr;
- }
ctx.epilogue_offset = ctx.idx;
ctx.exentry_idx = exentry_idx;
@@ -2163,20 +2133,16 @@ skip_init_ctx:
ctx.write = true;
/* Pass 3: Adjust jump offset and write final image */
- if (build_body(&ctx, extra_pass) ||
- WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset)) {
- prog = orig_prog;
+ if (build_body(env, &ctx, extra_pass) ||
+ WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset))
goto out_free_hdr;
- }
build_epilogue(&ctx, was_classic);
build_plt(&ctx);
/* Extra pass to validate JITed code. */
- if (validate_ctx(&ctx)) {
- prog = orig_prog;
+ if (validate_ctx(&ctx))
goto out_free_hdr;
- }
/* update the real prog size */
prog_size = sizeof(u32) * ctx.idx;
@@ -2193,23 +2159,14 @@ skip_init_ctx:
if (extra_pass && ctx.idx > jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d > %d\n",
ctx.idx, jit_data->ctx.idx);
- prog->bpf_func = NULL;
- prog->jited = 0;
- prog->jited_len = 0;
goto out_free_hdr;
}
if (WARN_ON(bpf_jit_binary_pack_finalize(ro_header, header))) {
- /* ro_header has been freed */
+ /* ro_header and header has been freed */
ro_header = NULL;
- prog = orig_prog;
- goto out_off;
+ header = NULL;
+ goto out_free_hdr;
}
- /*
- * The instructions have now been copied to the ROX region from
- * where they will execute. Now the data cache has to be cleaned to
- * the PoU and the I-cache has to be invalidated for the VAs.
- */
- bpf_flush_icache(ro_header, ctx.ro_image + ctx.idx);
} else {
jit_data->ctx = ctx;
jit_data->ro_image = ro_image_ptr;
@@ -2245,13 +2202,15 @@ out_priv_stack:
kfree(jit_data);
prog->aux->jit_data = NULL;
}
-out:
- if (tmp_blinded)
- bpf_jit_prog_release_other(prog, prog == orig_prog ?
- tmp : orig_prog);
+
return prog;
out_free_hdr:
+ if (extra_pass) {
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ }
if (header) {
bpf_arch_text_copy(&ro_header->size, &header->size,
sizeof(header->size));