diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/include/uapi/asm/socket.h | 2 | ||||
-rw-r--r-- | arch/mips/net/ebpf_jit.c | 163 |
2 files changed, 106 insertions, 59 deletions
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 882823bec153..6c755bc07975 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -120,4 +120,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 3f87b96da5c4..7646891c4e9b 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -113,6 +113,7 @@ struct jit_ctx { u64 *reg_val_types; unsigned int long_b_conversion:1; unsigned int gen_b_offsets:1; + unsigned int use_bbit_insns:1; }; static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) @@ -655,19 +656,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) return build_int_epilogue(ctx, MIPS_R_T9); } -static bool use_bbit_insns(void) -{ - switch (current_cpu_type()) { - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_CAVIUM_OCTEON3: - return true; - default: - return false; - } -} - static bool is_bad_offset(int b_off) { return b_off > 0x1ffff || b_off < -0x20000; @@ -682,6 +670,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, unsigned int target; u64 t64; s64 t64s; + int bpf_op = BPF_OP(insn->code); switch (insn->code) { case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ @@ -770,13 +759,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, dst, dst, 0); if (insn->imm == 1) { /* div by 1 is a nop, mod by 1 is zero */ - if (BPF_OP(insn->code) == BPF_MOD) + if (bpf_op == BPF_MOD) emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); emit_instr(ctx, divu, dst, MIPS_R_AT); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -798,13 +787,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (insn->imm == 1) { /* div by 1 is a nop, mod by 1 is zero */ - if (BPF_OP(insn->code) == BPF_MOD) + if (bpf_op == BPF_MOD) emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); emit_instr(ctx, ddivu, dst, MIPS_R_AT); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -829,7 +818,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); did_move = false; if (insn->src_reg == BPF_REG_10) { - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); did_move = true; } else { @@ -839,7 +828,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { int tmp_reg = MIPS_R_AT; - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { tmp_reg = dst; did_move = true; } @@ -847,7 +836,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); src = MIPS_R_AT; } - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_MOV: if (!did_move) emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); @@ -879,7 +868,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, ddivu, dst, src); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -923,7 +912,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { int tmp_reg = MIPS_R_AT; - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { tmp_reg = dst; did_move = true; } @@ -931,7 +920,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, tmp_reg, src, 0); src = MIPS_R_AT; } - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_MOV: if (!did_move) emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); @@ -962,7 +951,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, divu, dst, src); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -989,7 +978,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ - cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + cmp_eq = (bpf_op == BPF_JEQ); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; @@ -1002,8 +991,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, goto jeq_common; case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: @@ -1020,33 +1013,39 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, MIPS_R_AT, dst, 0); dst = MIPS_R_AT; } - if (BPF_OP(insn->code) == BPF_JSET) { + if (bpf_op == BPF_JSET) { emit_instr(ctx, and, MIPS_R_AT, dst, src); cmp_eq = false; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JSGT) { + } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) { emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - emit_instr(ctx, blez, MIPS_R_AT, b_off); + if (bpf_op == BPF_JSGT) + emit_instr(ctx, blez, MIPS_R_AT, b_off); + else + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); emit_instr(ctx, nop); return 2; /* We consumed the exit. */ } b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - emit_instr(ctx, bgtz, MIPS_R_AT, b_off); + if (bpf_op == BPF_JSGT) + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); + else + emit_instr(ctx, blez, MIPS_R_AT, b_off); emit_instr(ctx, nop); break; - } else if (BPF_OP(insn->code) == BPF_JSGE) { + } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) { emit_instr(ctx, slt, MIPS_R_AT, dst, src); - cmp_eq = true; + cmp_eq = bpf_op == BPF_JSGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGT) { + } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) { /* dst or src could be AT */ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); emit_instr(ctx, sltu, MIPS_R_AT, dst, src); @@ -1054,16 +1053,16 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); - cmp_eq = true; + cmp_eq = bpf_op == BPF_JGT; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGE) { + } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) { emit_instr(ctx, sltu, MIPS_R_AT, dst, src); - cmp_eq = true; + cmp_eq = bpf_op == BPF_JGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; } else { /* JNE/JEQ case */ - cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + cmp_eq = (bpf_op == BPF_JEQ); } jeq_common: /* @@ -1122,7 +1121,9 @@ jeq_common: break; case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ - cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); + case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */ + case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */ + cmp_eq = (bpf_op == BPF_JSGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; @@ -1132,65 +1133,92 @@ jeq_common: b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (cmp_eq) - emit_instr(ctx, bltz, dst, b_off); - else + switch (bpf_op) { + case BPF_JSGT: emit_instr(ctx, blez, dst, b_off); + break; + case BPF_JSGE: + emit_instr(ctx, bltz, dst, b_off); + break; + case BPF_JSLT: + emit_instr(ctx, bgez, dst, b_off); + break; + case BPF_JSLE: + emit_instr(ctx, bgtz, dst, b_off); + break; + } emit_instr(ctx, nop); return 2; /* We consumed the exit. */ } b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (cmp_eq) - emit_instr(ctx, bgez, dst, b_off); - else + switch (bpf_op) { + case BPF_JSGT: emit_instr(ctx, bgtz, dst, b_off); + break; + case BPF_JSGE: + emit_instr(ctx, bgez, dst, b_off); + break; + case BPF_JSLT: + emit_instr(ctx, bltz, dst, b_off); + break; + case BPF_JSLE: + emit_instr(ctx, blez, dst, b_off); + break; + } emit_instr(ctx, nop); break; } /* * only "LT" compare available, so we must use imm + 1 - * to generate "GT" + * to generate "GT" and imm -1 to generate LE */ - t64s = insn->imm + (cmp_eq ? 0 : 1); + if (bpf_op == BPF_JSGT) + t64s = insn->imm + 1; + else if (bpf_op == BPF_JSLE) + t64s = insn->imm + 1; + else + t64s = insn->imm; + + cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE; if (t64s >= S16_MIN && t64s <= S16_MAX) { emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; } emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: - cmp_eq = (BPF_OP(insn->code) == BPF_JGE); + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + cmp_eq = (bpf_op == BPF_JGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; /* * only "LT" compare available, so we must use imm + 1 - * to generate "GT" + * to generate "GT" and imm -1 to generate LE */ - t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); - if (t64s >= 0 && t64s <= S16_MAX) { - emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); - src = MIPS_R_AT; - dst = MIPS_R_ZERO; - cmp_eq = true; - goto jeq_common; - } + if (bpf_op == BPF_JGT) + t64s = (u64)(u32)(insn->imm) + 1; + else if (bpf_op == BPF_JLE) + t64s = (u64)(u32)(insn->imm) + 1; + else + t64s = (u64)(u32)(insn->imm); + + cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE; + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ @@ -1198,7 +1226,7 @@ jeq_common: if (dst < 0) return dst; - if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { + if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) { if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) @@ -1724,10 +1752,14 @@ static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, case BPF_JEQ: case BPF_JGT: case BPF_JGE: + case BPF_JLT: + case BPF_JLE: case BPF_JSET: case BPF_JNE: case BPF_JSGT: case BPF_JSGE: + case BPF_JSLT: + case BPF_JSLE: if (follow_taken) { rvt[idx] |= RVT_BRANCH_TAKEN; idx += insn->off; @@ -1853,6 +1885,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); + preempt_disable(); + switch (current_cpu_type()) { + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_CAVIUM_OCTEON3: + ctx.use_bbit_insns = 1; + break; + default: + ctx.use_bbit_insns = 0; + } + preempt_enable(); + ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); if (ctx.offsets == NULL) goto out_err; |