summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-02-04 13:22:40 -0800
committerAlexei Starovoitov <ast@kernel.org>2026-02-04 13:23:35 -0800
commitb2821311abbd05d3340ad7f09fe89f088572b682 (patch)
tree7cd5003a9c1361b74a280623464b22704415ddfd /kernel
parent4af526698b15ec6cb3c8ad8085335b1d55692f00 (diff)
parent56415363e02f0f561ecc5bda6a4318438f888b43 (diff)
Merge branch 'bpf-add-bitwise-tracking-for-bpf_end'
Tianci Cao says: ==================== bpf: Add bitwise tracking for BPF_END Add bitwise tracking (tnum analysis) for BPF_END (`bswap(16|32|64)`, `be(16|32|64)`, `le(16|32|64)`) operations. Please see commit log of 1/2 for more details. v3: - Resend to fix a version control error in v2. - The rest of the changes are identical to v2. v2 (incorrect): https://lore.kernel.org/bpf/20260204091146.52447-1-ziye@zju.edu.cn/ - Refactored selftests using BSWAP_RANGE_TEST macro to eliminate code duplication and improve maintainability. (Eduard) - Simplified test names. (Eduard) - Reduced excessive comments in test cases. (Eduard) - Added more comments to explain BPF_END's special handling of zext_32_to_64. v1: https://lore.kernel.org/bpf/20260202133536.66207-1-ziye@zju.edu.cn/ ==================== Link: https://patch.msgid.link/20260204111503.77871-1-ziye@zju.edu.cn Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/tnum.c16
-rw-r--r--kernel/bpf/verifier.c60
2 files changed, 73 insertions, 3 deletions
diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
index f8e70e9c3998..26fbfbb01700 100644
--- a/kernel/bpf/tnum.c
+++ b/kernel/bpf/tnum.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
#include <linux/tnum.h>
+#include <linux/swab.h>
#define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
/* A completely unknown value */
@@ -253,3 +254,18 @@ struct tnum tnum_const_subreg(struct tnum a, u32 value)
{
return tnum_with_subreg(a, tnum_const(value));
}
+
+struct tnum tnum_bswap16(struct tnum a)
+{
+ return TNUM(swab16(a.value & 0xFFFF), swab16(a.mask & 0xFFFF));
+}
+
+struct tnum tnum_bswap32(struct tnum a)
+{
+ return TNUM(swab32(a.value & 0xFFFFFFFF), swab32(a.mask & 0xFFFFFFFF));
+}
+
+struct tnum tnum_bswap64(struct tnum a)
+{
+ return TNUM(swab64(a.value), swab64(a.mask));
+}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 40a8252140fb..92e03a5a50f5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -15832,6 +15832,48 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
__update_reg_bounds(dst_reg);
}
+static void scalar_byte_swap(struct bpf_reg_state *dst_reg, struct bpf_insn *insn)
+{
+ /*
+ * Byte swap operation - update var_off using tnum_bswap.
+ * Three cases:
+ * 1. bswap(16|32|64): opcode=0xd7 (BPF_END | BPF_ALU64 | BPF_TO_LE)
+ * unconditional swap
+ * 2. to_le(16|32|64): opcode=0xd4 (BPF_END | BPF_ALU | BPF_TO_LE)
+ * swap on big-endian, truncation or no-op on little-endian
+ * 3. to_be(16|32|64): opcode=0xdc (BPF_END | BPF_ALU | BPF_TO_BE)
+ * swap on little-endian, truncation or no-op on big-endian
+ */
+
+ bool alu64 = BPF_CLASS(insn->code) == BPF_ALU64;
+ bool to_le = BPF_SRC(insn->code) == BPF_TO_LE;
+ bool is_big_endian;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ is_big_endian = true;
+#else
+ is_big_endian = false;
+#endif
+ /* Apply bswap if alu64 or switch between big-endian and little-endian machines */
+ bool need_bswap = alu64 || (to_le == is_big_endian);
+
+ if (need_bswap) {
+ if (insn->imm == 16)
+ dst_reg->var_off = tnum_bswap16(dst_reg->var_off);
+ else if (insn->imm == 32)
+ dst_reg->var_off = tnum_bswap32(dst_reg->var_off);
+ else if (insn->imm == 64)
+ dst_reg->var_off = tnum_bswap64(dst_reg->var_off);
+ /*
+ * Byteswap scrambles the range, so we must reset bounds.
+ * Bounds will be re-derived from the new tnum later.
+ */
+ __mark_reg_unbounded(dst_reg);
+ }
+ /* For bswap16/32, truncate dst register to match the swapped size */
+ if (insn->imm == 16 || insn->imm == 32)
+ coerce_reg_to_size(dst_reg, insn->imm / 8);
+}
+
static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
const struct bpf_reg_state *src_reg)
{
@@ -15858,6 +15900,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
case BPF_XOR:
case BPF_OR:
case BPF_MUL:
+ case BPF_END:
return true;
/*
@@ -16047,12 +16090,23 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
else
scalar_min_max_arsh(dst_reg, &src_reg);
break;
+ case BPF_END:
+ scalar_byte_swap(dst_reg, insn);
+ break;
default:
break;
}
- /* ALU32 ops are zero extended into 64bit register */
- if (alu32)
+ /*
+ * ALU32 ops are zero extended into 64bit register.
+ *
+ * BPF_END is already handled inside the helper (truncation),
+ * so skip zext here to avoid unexpected zero extension.
+ * e.g., le64: opcode=(BPF_END|BPF_ALU|BPF_TO_LE), imm=0x40
+ * This is a 64bit byte swap operation with alu32==true,
+ * but we should not zero extend the result.
+ */
+ if (alu32 && opcode != BPF_END)
zext_32_to_64(dst_reg);
reg_bounds_sync(dst_reg);
return 0;
@@ -16232,7 +16286,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
/* check dest operand */
- if (opcode == BPF_NEG &&
+ if ((opcode == BPF_NEG || opcode == BPF_END) &&
regs[insn->dst_reg].type == SCALAR_VALUE) {
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
err = err ?: adjust_scalar_min_max_vals(env, insn,