summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/net/bpf_jit_32.c25
-rw-r--r--arch/arm64/net/bpf_jit_comp.c93
-rw-r--r--arch/loongarch/net/bpf_jit.c22
-rw-r--r--arch/mips/net/bpf_jit_comp.c3
-rw-r--r--arch/parisc/net/bpf_jit_core.c8
-rw-r--r--arch/riscv/net/bpf_jit.h2
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c203
-rw-r--r--arch/riscv/net/bpf_jit_core.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c6
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c102
-rw-r--r--arch/x86/net/bpf_jit_comp32.c3
12 files changed, 425 insertions, 50 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 72b5cd697f5d..deeb8f292454 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -2252,28 +2252,21 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* If building the body of the JITed code fails somehow,
* we fall back to the interpretation.
*/
- if (build_body(&ctx) < 0) {
- image_ptr = NULL;
- bpf_jit_binary_free(header);
- prog = orig_prog;
- goto out_imms;
- }
+ if (build_body(&ctx) < 0)
+ goto out_free;
build_epilogue(&ctx);
/* 3.) Extra pass to validate JITed Code */
- if (validate_code(&ctx)) {
- image_ptr = NULL;
- bpf_jit_binary_free(header);
- prog = orig_prog;
- goto out_imms;
- }
+ if (validate_code(&ctx))
+ goto out_free;
flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
if (bpf_jit_enable > 1)
/* there are 2 passes here */
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header))
+ goto out_free;
prog->bpf_func = (void *)ctx.target;
prog->jited = 1;
prog->jited_len = image_size;
@@ -2290,5 +2283,11 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
+
+out_free:
+ image_ptr = NULL;
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_imms;
}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 13eac43c632d..cf9a9ebb4cda 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -29,6 +29,7 @@
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
+#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -67,6 +68,8 @@ static const int bpf2a64[] = {
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
[FP_BOTTOM] = A64_R(27),
+ /* callee saved register for kern_vm_start address */
+ [ARENA_VM_START] = A64_R(28),
};
struct jit_ctx {
@@ -79,6 +82,7 @@ struct jit_ctx {
__le32 *ro_image;
u32 stack_size;
int fpb_offset;
+ u64 user_vm_start;
};
struct bpf_plt {
@@ -295,7 +299,7 @@ static bool is_lsi_offset(int offset, int scale)
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
- bool is_exception_cb)
+ bool is_exception_cb, u64 arena_vm_start)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -306,6 +310,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tcc = bpf2a64[TCALL_CNT];
const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const int idx0 = ctx->idx;
int cur_offset;
@@ -411,6 +416,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+
+ if (arena_vm_start)
+ emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx);
+
return 0;
}
@@ -738,6 +747,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
+#define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -745,7 +755,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
- regs->regs[dst_reg] = 0;
+ if (dst_reg != DONT_CLEAR)
+ regs->regs[dst_reg] = 0;
regs->pc = (unsigned long)&ex->fixup - offset;
return true;
}
@@ -765,7 +776,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
- BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32)
return 0;
if (!ctx->prog->aux->extable ||
@@ -810,6 +822,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
ex->insn = ins_offset;
+ if (BPF_CLASS(insn->code) != BPF_LDX)
+ dst_reg = DONT_CLEAR;
+
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
@@ -829,12 +844,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
bool extra_pass)
{
const u8 code = insn->code;
- const u8 dst = bpf2a64[insn->dst_reg];
- const u8 src = bpf2a64[insn->src_reg];
+ u8 dst = bpf2a64[insn->dst_reg];
+ u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -853,6 +869,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ emit(A64_MOV(0, tmp, src), ctx); // 32-bit mov clears the upper 32 bits
+ emit_a64_mov_i(0, dst, ctx->user_vm_start >> 32, ctx);
+ emit(A64_LSL(1, dst, dst, 32), ctx);
+ emit(A64_CBZ(1, tmp, 2), ctx);
+ emit(A64_ORR(1, tmp, dst, tmp), ctx);
+ emit(A64_MOV(1, dst, tmp), ctx);
+ break;
+ }
switch (insn->off) {
case 0:
emit(A64_MOV(is64, dst, src), ctx);
@@ -1237,7 +1262,15 @@ emit_cond_jmp:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
- if (ctx->fpb_offset > 0 && src == fp) {
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
+ src = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
src_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1322,7 +1355,15 @@ emit_cond_jmp:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
- if (ctx->fpb_offset > 0 && dst == fp) {
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
+ dst = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
dst_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1365,6 +1406,10 @@ emit_cond_jmp:
}
break;
}
+
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
break;
/* STX: *(size *)(dst + off) = src */
@@ -1372,7 +1417,15 @@ emit_cond_jmp:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
- if (ctx->fpb_offset > 0 && dst == fp) {
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
+ dst = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
dst_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1413,6 +1466,10 @@ emit_cond_jmp:
}
break;
}
+
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
@@ -1594,6 +1651,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
+ u64 arena_vm_start;
u8 *image_ptr;
u8 *ro_image_ptr;
@@ -1611,6 +1669,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
+ arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
@@ -1641,6 +1700,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.fpb_offset = find_fpb_offset(prog);
+ ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
/*
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
@@ -1648,7 +1708,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
+ if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
+ arena_vm_start)) {
prog = orig_prog;
goto out_off;
}
@@ -1696,7 +1757,7 @@ skip_init_ctx:
ctx.idx = 0;
ctx.exentry_idx = 0;
- build_prologue(&ctx, was_classic, prog->aux->exception_cb);
+ build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
@@ -2176,12 +2237,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
-void arch_protect_bpf_trampoline(void *image, unsigned int size)
-{
-}
-
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
+ return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
@@ -2464,6 +2522,11 @@ bool bpf_jit_supports_exceptions(void)
return true;
}
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index e73323d759d0..7dbefd4ba210 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1294,16 +1294,19 @@ skip_init_ctx:
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
if (!prog->is_func || extra_pass) {
+ int err;
+
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
- bpf_jit_binary_free(header);
- prog->bpf_func = NULL;
- prog->jited = 0;
- prog->jited_len = 0;
- goto out_offset;
+ goto out_free;
+ }
+ err = bpf_jit_binary_lock_ro(header);
+ if (err) {
+ pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
+ err);
+ goto out_free;
}
- bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
@@ -1334,6 +1337,13 @@ out:
out_offset = -1;
return prog;
+
+out_free:
+ bpf_jit_binary_free(header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c
index a40d926b6513..e355dfca4400 100644
--- a/arch/mips/net/bpf_jit_comp.c
+++ b/arch/mips/net/bpf_jit_comp.c
@@ -1012,7 +1012,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
/* Set as read-only exec and flush instruction cache */
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header))
+ goto out_err;
flush_icache_range((unsigned long)header,
(unsigned long)&ctx.target[ctx.jit_index]);
diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c
index d6ee2fd45550..979f45d4d1fb 100644
--- a/arch/parisc/net/bpf_jit_core.c
+++ b/arch/parisc/net/bpf_jit_core.c
@@ -167,7 +167,13 @@ skip_init_ctx:
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
if (!prog->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(jit_data->header);
+ if (bpf_jit_binary_lock_ro(jit_data->header)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
+ }
prologue_len = ctx->epilogue_offset - ctx->body_len;
for (i = 0; i < prog->len; i++)
ctx->offset[i] += prologue_len;
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index f4b6b3b9edda..5fc374ed98ea 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -81,6 +81,8 @@ struct rv_jit_context {
int nexentries;
unsigned long flags;
int stack_size;
+ u64 arena_vm_start;
+ u64 user_vm_start;
};
/* Convert from ninsns to bytes. */
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index ec9d692838fc..e713704be837 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -18,6 +18,7 @@
#define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
+#define RV_REG_ARENA RV_REG_S7 /* For storing arena_vm_start */
static const int regmap[] = {
[BPF_REG_0] = RV_REG_A5,
@@ -255,6 +256,10 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
+ if (ctx->arena_vm_start) {
+ emit_ld(RV_REG_ARENA, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
@@ -548,6 +553,7 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
+#define REG_DONT_CLEAR_MARKER 0 /* RV_REG_ZERO unused in pt_regmap */
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -555,7 +561,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
- *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
+ if (regs_offset != REG_DONT_CLEAR_MARKER)
+ *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
regs->epc = (unsigned long)&ex->fixup - offset;
return true;
@@ -572,7 +579,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
off_t fixup_offset;
if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable ||
- (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX))
+ (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32))
return 0;
if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
@@ -1073,6 +1081,15 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ emit_mv(RV_REG_T1, rs, ctx);
+ emit_zextw(RV_REG_T1, RV_REG_T1, ctx);
+ emit_imm(rd, (ctx->user_vm_start >> 32) << 32, ctx);
+ emit(rv_beq(RV_REG_T1, RV_REG_ZERO, 4), ctx);
+ emit_or(RV_REG_T1, rd, RV_REG_T1, ctx);
+ emit_mv(rd, RV_REG_T1, ctx);
+ break;
+ }
if (imm == 1) {
/* Special mov32 for zext */
emit_zextw(rd, rd, ctx);
@@ -1539,6 +1556,11 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
+ /* LDX | PROBE_MEM32: dst = *(unsigned size *)(src + RV_REG_ARENA + off) */
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
{
int insn_len, insns_start;
bool sign_ext;
@@ -1546,6 +1568,11 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit_add(RV_REG_T2, rs, RV_REG_ARENA, ctx);
+ rs = RV_REG_T2;
+ }
+
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_12b_int(off)) {
@@ -1682,6 +1709,86 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
break;
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
+ {
+ int insn_len, insns_start;
+
+ emit_add(RV_REG_T3, rd, RV_REG_ARENA, ctx);
+ rd = RV_REG_T3;
+
+ /* Load imm to a register then store it */
+ emit_imm(RV_REG_T1, imm, ctx);
+
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sb(rd, off, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_H:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sh(rd, off, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_W:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sw(rd, off, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_DW:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sd(rd, off, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
+ insn_len);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_B:
if (is_12b_int(off)) {
@@ -1728,6 +1835,84 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_atomic(rd, rs, off, imm,
BPF_SIZE(code) == BPF_DW, ctx);
break;
+
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ {
+ int insn_len, insns_start;
+
+ emit_add(RV_REG_T2, rd, RV_REG_ARENA, ctx);
+ rd = RV_REG_T2;
+
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sb(rd, off, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sb(RV_REG_T1, 0, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_H:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sh(rd, off, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sh(RV_REG_T1, 0, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_W:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sw(rd, off, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sw(RV_REG_T1, 0, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_DW:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sd(rd, off, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sd(RV_REG_T1, 0, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
+ insn_len);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
return -EINVAL;
@@ -1759,6 +1944,8 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
stack_adjust += 8;
if (seen_reg(RV_REG_S6, ctx))
stack_adjust += 8;
+ if (ctx->arena_vm_start)
+ stack_adjust += 8;
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
@@ -1810,6 +1997,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
store_offset -= 8;
}
+ if (ctx->arena_vm_start) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_ARENA, ctx);
+ store_offset -= 8;
+ }
emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
@@ -1823,6 +2014,9 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
ctx->stack_size = stack_adjust;
+
+ if (ctx->arena_vm_start)
+ emit_imm(RV_REG_ARENA, ctx->arena_vm_start, ctx);
}
void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
@@ -1839,3 +2033,8 @@ bool bpf_jit_supports_ptr_xchg(void)
{
return true;
}
+
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 6b3acac30c06..8a69d6d81e32 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -80,6 +80,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto skip_init_ctx;
}
+ ctx->arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
+ ctx->user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
ctx->prog = prog;
ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (!ctx->offset) {
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 5af0402e94b8..fa2f824e3b06 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -2108,7 +2108,11 @@ skip_init_ctx:
print_fn_code(jit.prg_buf, jit.size_prg);
}
if (!fp->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header)) {
+ bpf_jit_binary_free(header);
+ fp = orig_fp;
+ goto free_addrs;
+ }
} else {
jit_data->header = header;
jit_data->ctx = jit;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index fa0759bfe498..73bf0aea8baf 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1602,7 +1602,11 @@ skip_init_ctx:
bpf_flush_icache(header, (u8 *)header + header->size);
if (!prog->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header)) {
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_off;
+ }
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 59cbc94b6e69..5159c7a22922 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -816,9 +816,10 @@ done:
static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
const u32 imm32_hi, const u32 imm32_lo)
{
+ u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
u8 *prog = *pprog;
- if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
+ if (is_uimm32(imm64)) {
/*
* For emitting plain u32, where sign bit must not be
* propagated LLVM tends to load imm64 over mov32
@@ -826,6 +827,8 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
* 'mov %eax, imm32' instead.
*/
emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
+ } else if (is_simm32(imm64)) {
+ emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
} else {
/* movabsq rax, imm64 */
EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
@@ -1169,6 +1172,54 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
return 0;
}
+static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
+ u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+{
+ u8 *prog = *pprog;
+
+ EMIT1(0xF0); /* lock prefix */
+ switch (size) {
+ case BPF_W:
+ EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
+ break;
+ case BPF_DW:
+ EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
+ break;
+ default:
+ pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
+ return -EFAULT;
+ }
+
+ /* emit opcode */
+ switch (atomic_op) {
+ case BPF_ADD:
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+ /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
+ EMIT1(simple_alu_opcodes[atomic_op]);
+ break;
+ case BPF_ADD | BPF_FETCH:
+ /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
+ EMIT2(0x0F, 0xC1);
+ break;
+ case BPF_XCHG:
+ /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
+ EMIT1(0x87);
+ break;
+ case BPF_CMPXCHG:
+ /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
+ EMIT2(0x0F, 0xB1);
+ break;
+ default:
+ pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
+ return -EFAULT;
+ }
+ emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
+ *pprog = prog;
+ return 0;
+}
+
#define DONT_CLEAR 1
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
@@ -1351,8 +1402,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
break;
case BPF_ALU64 | BPF_MOV | BPF_X:
- if (insn->off == BPF_ADDR_SPACE_CAST &&
- insn->imm == 1U << 16) {
+ if (insn_is_cast_user(insn)) {
if (dst_reg != src_reg)
/* 32-bit mov */
emit_mov_reg(&prog, false, dst_reg, src_reg);
@@ -1383,6 +1433,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
break;
+ } else if (insn_is_mov_percpu_addr(insn)) {
+ /* mov <dst>, <src> (if necessary) */
+ EMIT_mov(dst_reg, src_reg);
+#ifdef CONFIG_SMP
+ /* add <dst>, gs:[<off>] */
+ EMIT2(0x65, add_1mod(0x48, dst_reg));
+ EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
+ EMIT((u32)(unsigned long)&this_cpu_off, 4);
+#endif
+ break;
}
fallthrough;
case BPF_ALU | BPF_MOV | BPF_X:
@@ -1963,6 +2023,15 @@ populate_extable:
return err;
break;
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
+ start_of_ldx = prog;
+ err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
+ dst_reg, src_reg, X86_REG_R12, insn->off);
+ if (err)
+ return err;
+ goto populate_extable;
+
/* call */
case BPF_JMP | BPF_CALL: {
u8 *ip = image + addrs[i - 1];
@@ -2994,12 +3063,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
-void arch_protect_bpf_trampoline(void *image, unsigned int size)
-{
-}
-
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
+ return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
@@ -3359,6 +3425,11 @@ bool bpf_jit_supports_subprog_tailcalls(void)
return true;
}
+bool bpf_jit_supports_percpu_insn(void)
+{
+ return true;
+}
+
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
@@ -3462,6 +3533,21 @@ bool bpf_jit_supports_arena(void)
return true;
}
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+ if (!in_arena)
+ return true;
+ switch (insn->code) {
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (insn->imm == (BPF_AND | BPF_FETCH) ||
+ insn->imm == (BPF_OR | BPF_FETCH) ||
+ insn->imm == (BPF_XOR | BPF_FETCH))
+ return false;
+ }
+ return true;
+}
+
bool bpf_jit_supports_ptr_xchg(void)
{
return true;
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index c10083a8e68e..de0f9e5f9f73 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2600,8 +2600,7 @@ out_image:
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, image);
- if (image) {
- bpf_jit_binary_lock_ro(header);
+ if (image && !bpf_jit_binary_lock_ro(header)) {
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;