summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorEduard Zingerman <eddyz87@gmail.com>2025-09-18 19:18:42 -0700
committerAlexei Starovoitov <ast@kernel.org>2025-09-19 09:27:23 -0700
commit107e169799057bc6a379ddb625cbe1e51cfc7d72 (patch)
treea1d76c8f2246d90695850de0453485b9a6ae924e /kernel
parentccf25a67c7e29cfa6815d193054789b45ef825ad (diff)
bpf: disable and remove registers chain based liveness
Remove register chain based liveness tracking: - struct bpf_reg_state->{parent,live} fields are no longer needed; - REG_LIVE_WRITTEN marks are superseded by bpf_mark_stack_write() calls; - mark_reg_read() calls are superseded by bpf_mark_stack_read(); - log.c:print_liveness() is superseded by logging in liveness.c; - propagate_liveness() is superseded by bpf_update_live_stack(); - no need to establish register chains in is_state_visited() anymore; - fix a bunch of tests expecting "_w" suffixes in verifier log messages. Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20250918-callchain-sensitive-liveness-v3-9-c3cd27bacc60@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/log.c26
-rw-r--r--kernel/bpf/verifier.c315
2 files changed, 25 insertions, 316 deletions
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 0d6d7bfb2fd0..f50533169cc3 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -542,17 +542,6 @@ static char slot_type_char[] = {
[STACK_IRQ_FLAG] = 'f'
};
-static void print_liveness(struct bpf_verifier_env *env,
- enum bpf_reg_liveness live)
-{
- if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
- verbose(env, "_");
- if (live & REG_LIVE_READ)
- verbose(env, "r");
- if (live & REG_LIVE_WRITTEN)
- verbose(env, "w");
-}
-
#define UNUM_MAX_DECIMAL U16_MAX
#define SNUM_MAX_DECIMAL S16_MAX
#define SNUM_MIN_DECIMAL S16_MIN
@@ -770,7 +759,6 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
if (!print_all && !reg_scratched(env, i))
continue;
verbose(env, " R%d", i);
- print_liveness(env, reg->live);
verbose(env, "=");
print_reg_state(env, state, reg);
}
@@ -803,9 +791,7 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
break;
types_buf[j] = '\0';
- verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
- verbose(env, "=%s", types_buf);
+ verbose(env, " fp%d=%s", (-i - 1) * BPF_REG_SIZE, types_buf);
print_reg_state(env, state, reg);
break;
case STACK_DYNPTR:
@@ -814,7 +800,6 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
reg = &state->stack[i].spilled_ptr;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
verbose(env, "=dynptr_%s(", dynptr_type_str(reg->dynptr.type));
if (reg->id)
verbose_a("id=%d", reg->id);
@@ -829,9 +814,8 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
if (!reg->ref_obj_id)
continue;
- verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
- verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
+ verbose(env, " fp%d=iter_%s(ref_id=%d,state=%s,depth=%u)",
+ (-i - 1) * BPF_REG_SIZE,
iter_type_str(reg->iter.btf, reg->iter.btf_id),
reg->ref_obj_id, iter_state_str(reg->iter.state),
reg->iter.depth);
@@ -839,9 +823,7 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
case STACK_MISC:
case STACK_ZERO:
default:
- verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
- verbose(env, "=%s", types_buf);
+ verbose(env, " fp%d=%s", (-i - 1) * BPF_REG_SIZE, types_buf);
break;
}
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f70e34a38c13..e1da2471442b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -787,8 +787,6 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
}
- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
- state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
return 0;
@@ -806,29 +804,6 @@ static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_stat
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
- /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
- *
- * While we don't allow reading STACK_INVALID, it is still possible to
- * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
- * helpers or insns can do partial read of that part without failing,
- * but check_stack_range_initialized, check_stack_read_var_off, and
- * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
- * the slot conservatively. Hence we need to prevent those liveness
- * marking walks.
- *
- * This was not a problem before because STACK_INVALID is only set by
- * default (where the default reg state has its reg->parent as NULL), or
- * in clean_live_states after REG_LIVE_DONE (at which point
- * mark_reg_read won't walk reg->parent chain), but not randomly during
- * verifier state exploration (like we did above). Hence, for our case
- * parentage chain will still be live (i.e. reg->parent may be
- * non-NULL), while earlier reg->parent was NULL, so we need
- * REG_LIVE_WRITTEN to screen off read marker propagation when it is
- * done later on reads or by mark_dynptr_read as well to unnecessary
- * mark registers in verifier state.
- */
- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
- state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
}
@@ -938,9 +913,6 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
- /* Same reason as unmark_stack_slots_dynptr above */
- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
- state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
return 0;
@@ -1059,7 +1031,6 @@ static int mark_stack_slots_iter(struct bpf_verifier_env *env,
else
st->type |= PTR_UNTRUSTED;
}
- st->live |= REG_LIVE_WRITTEN;
st->ref_obj_id = i == 0 ? id : 0;
st->iter.btf = btf;
st->iter.btf_id = btf_id;
@@ -1095,9 +1066,6 @@ static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
__mark_reg_not_init(env, st);
- /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
- st->live |= REG_LIVE_WRITTEN;
-
for (j = 0; j < BPF_REG_SIZE; j++)
slot->slot_type[j] = STACK_INVALID;
@@ -1194,7 +1162,6 @@ static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env,
bpf_mark_stack_write(env, reg->frameno, BIT(spi));
__mark_reg_known_zero(st);
st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
- st->live |= REG_LIVE_WRITTEN;
st->ref_obj_id = id;
st->irq.kfunc_class = kfunc_class;
@@ -1248,8 +1215,6 @@ static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_r
__mark_reg_not_init(env, st);
- /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
- st->live |= REG_LIVE_WRITTEN;
bpf_mark_stack_write(env, reg->frameno, BIT(spi));
for (i = 0; i < BPF_REG_SIZE; i++)
@@ -2901,8 +2866,6 @@ static void init_reg_state(struct bpf_verifier_env *env,
for (i = 0; i < MAX_BPF_REG; i++) {
mark_reg_not_init(env, regs, i);
- regs[i].live = REG_LIVE_NONE;
- regs[i].parent = NULL;
regs[i].subreg_def = DEF_NOT_SUBREG;
}
@@ -3583,64 +3546,12 @@ next:
return 0;
}
-/* Parentage chain of this register (or stack slot) should take care of all
- * issues like callee-saved registers, stack slot allocation time, etc.
- */
-static int mark_reg_read(struct bpf_verifier_env *env,
- const struct bpf_reg_state *state,
- struct bpf_reg_state *parent, u8 flag)
-{
- bool writes = parent == state->parent; /* Observe write marks */
- int cnt = 0;
-
- while (parent) {
- /* if read wasn't screened by an earlier write ... */
- if (writes && state->live & REG_LIVE_WRITTEN)
- break;
- /* The first condition is more likely to be true than the
- * second, checked it first.
- */
- if ((parent->live & REG_LIVE_READ) == flag ||
- parent->live & REG_LIVE_READ64)
- /* The parentage chain never changes and
- * this parent was already marked as LIVE_READ.
- * There is no need to keep walking the chain again and
- * keep re-marking all parents as LIVE_READ.
- * This case happens when the same register is read
- * multiple times without writes into it in-between.
- * Also, if parent has the stronger REG_LIVE_READ64 set,
- * then no need to set the weak REG_LIVE_READ32.
- */
- break;
- /* ... then we depend on parent's value */
- parent->live |= flag;
- /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
- if (flag == REG_LIVE_READ64)
- parent->live &= ~REG_LIVE_READ32;
- state = parent;
- parent = state->parent;
- writes = true;
- cnt++;
- }
-
- if (env->longest_mark_read_walk < cnt)
- env->longest_mark_read_walk = cnt;
- return 0;
-}
-
static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
int spi, int nr_slots)
{
- struct bpf_func_state *state = func(env, reg);
int err, i;
for (i = 0; i < nr_slots; i++) {
- struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr;
-
- err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64);
- if (err)
- return err;
-
err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi - i));
if (err)
return err;
@@ -3852,15 +3763,13 @@ static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *r
if (rw64)
mark_insn_zext(env, reg);
- return mark_reg_read(env, reg, reg->parent,
- rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
+ return 0;
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
verbose(env, "frame pointer is read only\n");
return -EACCES;
}
- reg->live |= REG_LIVE_WRITTEN;
reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
if (t == DST_OP)
mark_reg_unknown(env, regs, regno);
@@ -5065,12 +4974,7 @@ static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
/* Copy src state preserving dst->parent and dst->live fields */
static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
{
- struct bpf_reg_state *parent = dst->parent;
- enum bpf_reg_liveness live = dst->live;
-
*dst = *src;
- dst->parent = parent;
- dst->live = live;
}
static void save_register_state(struct bpf_verifier_env *env,
@@ -5081,8 +4985,6 @@ static void save_register_state(struct bpf_verifier_env *env,
int i;
copy_register_state(&state->stack[spi].spilled_ptr, reg);
- if (size == BPF_REG_SIZE)
- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
state->stack[spi].slot_type[i - 1] = STACK_SPILL;
@@ -5231,17 +5133,6 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
for (i = 0; i < BPF_REG_SIZE; i++)
scrub_spilled_slot(&state->stack[spi].slot_type[i]);
- /* only mark the slot as written if all 8 bytes were written
- * otherwise read propagation may incorrectly stop too soon
- * when stack slots are partially written.
- * This heuristic means that read propagation will be
- * conservative, since it will add reg_live_read marks
- * to stack slots all the way to first state when programs
- * writes+reads less than 8 bytes
- */
- if (size == BPF_REG_SIZE)
- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
-
/* when we zero initialize stack slots mark them as such */
if ((reg && register_is_null(reg)) ||
(!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
@@ -5434,7 +5325,6 @@ static void mark_reg_stack_read(struct bpf_verifier_env *env,
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, dst_regno);
}
- state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
}
/* Read the stack at 'off' and put the results into the register indicated by
@@ -5481,7 +5371,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
return -EACCES;
}
- mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
if (dst_regno < 0)
return 0;
@@ -5535,7 +5424,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
insn_flags = 0; /* not restoring original register state */
}
}
- state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
} else if (dst_regno >= 0) {
/* restore register state from stack */
copy_register_state(&state->regs[dst_regno], reg);
@@ -5543,7 +5431,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
* has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions
*/
- state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
/* If dst_regno==-1, the caller is asking us whether
* it is acceptable to use this value as a SCALAR_VALUE
@@ -5555,7 +5442,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
off);
return -EACCES;
}
- mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else {
for (i = 0; i < size; i++) {
type = stype[(slot - i) % BPF_REG_SIZE];
@@ -5569,7 +5455,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
off, i, size);
return -EACCES;
}
- mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
if (dst_regno >= 0)
mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
insn_flags = 0; /* we are not restoring spilled register */
@@ -8197,13 +8082,10 @@ mark:
/* reading any byte out of 8-byte 'spill_slot' will cause
* the whole slot to be marked as 'read'
*/
- mark_reg_read(env, &state->stack[spi].spilled_ptr,
- state->stack[spi].spilled_ptr.parent,
- REG_LIVE_READ64);
err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi));
if (err)
return err;
- /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
+ /* We do not call bpf_mark_stack_write(), as we can not
* be sure that whether stack slot is written to or not. Hence,
* we must still conservatively propagate reads upwards even if
* helper may write to the entire memory range.
@@ -11041,8 +10923,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
}
/* we are going to rely on register's precise value */
- err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
- err = err ?: mark_chain_precision(env, BPF_REG_0);
+ err = mark_chain_precision(env, BPF_REG_0);
if (err)
return err;
@@ -11946,17 +11827,11 @@ static void __mark_btf_func_reg_size(struct bpf_verifier_env *env, struct bpf_re
if (regno == BPF_REG_0) {
/* Function return value */
- reg->live |= REG_LIVE_WRITTEN;
reg->subreg_def = reg_size == sizeof(u64) ?
DEF_NOT_SUBREG : env->insn_idx + 1;
- } else {
+ } else if (reg_size == sizeof(u64)) {
/* Function argument */
- if (reg_size == sizeof(u64)) {
- mark_insn_zext(env, reg);
- mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
- } else {
- mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
- }
+ mark_insn_zext(env, reg);
}
}
@@ -15710,7 +15585,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
*/
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
- dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG;
} else {
/* case: R1 = (s8, s16 s32)R2 */
@@ -15729,7 +15603,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (!no_sext)
dst_reg->id = 0;
coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
- dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG;
} else {
mark_reg_unknown(env, regs, insn->dst_reg);
@@ -15755,7 +15628,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
*/
if (!is_src_reg_u32)
dst_reg->id = 0;
- dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = env->insn_idx + 1;
} else {
/* case: W1 = (s8, s16)W2 */
@@ -15766,7 +15638,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
copy_register_state(dst_reg, src_reg);
if (!no_sext)
dst_reg->id = 0;
- dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = env->insn_idx + 1;
coerce_subreg_to_size_sx(dst_reg, insn->off >> 3);
}
@@ -18576,11 +18447,6 @@ static void clean_func_state(struct bpf_verifier_env *env,
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
if (!bpf_stack_slot_alive(env, st->frameno, i)) {
- if (st->stack[i].spilled_ptr.live & REG_LIVE_READ) {
- verifier_bug(env, "incorrect live marks #1 for insn %d frameno %d spi %d\n",
- env->insn_idx, st->frameno, i);
- env->internal_error = true;
- }
__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
st->stack[i].slot_type[j] = STACK_INVALID;
@@ -18609,25 +18475,23 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
* but a lot of states will get revised from liveness point of view when
* the verifier explores other branches.
* Example:
- * 1: r0 = 1
+ * 1: *(u64)(r10 - 8) = 1
* 2: if r1 == 100 goto pc+1
- * 3: r0 = 2
- * 4: exit
- * when the verifier reaches exit insn the register r0 in the state list of
- * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
- * of insn 2 and goes exploring further. At the insn 4 it will walk the
- * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
+ * 3: *(u64)(r10 - 8) = 2
+ * 4: r0 = *(u64)(r10 - 8)
+ * 5: exit
+ * when the verifier reaches exit insn the stack slot -8 in the state list of
+ * insn 2 is not yet marked alive. Then the verifier pops the other_branch
+ * of insn 2 and goes exploring further. After the insn 4 read, liveness
+ * analysis would propagate read mark for -8 at insn 2.
*
* Since the verifier pushes the branch states as it sees them while exploring
* the program the condition of walking the branch instruction for the second
* time means that all states below this branch were already explored and
* their final liveness marks are already propagated.
* Hence when the verifier completes the search of state list in is_state_visited()
- * we can call this clean_live_states() function to mark all liveness states
- * as st->cleaned to indicate that 'parent' pointers of 'struct bpf_reg_state'
- * will not be used.
- * This function also clears the registers and stack for states that !READ
- * to simplify state merging.
+ * we can call this clean_live_states() function to clear dead the registers and stack
+ * slots to simplify state merging.
*
* Important note here that walking the same branch instruction in the callee
* doesn't meant that the states are DONE. The verifier has to compare
@@ -18802,7 +18666,6 @@ static struct bpf_reg_state unbound_reg;
static __init int unbound_reg_init(void)
{
__mark_reg_unknown_imprecise(&unbound_reg);
- unbound_reg.live |= REG_LIVE_READ;
return 0;
}
late_initcall(unbound_reg_init);
@@ -19097,91 +18960,6 @@ static bool states_equal(struct bpf_verifier_env *env,
return true;
}
-/* Return 0 if no propagation happened. Return negative error code if error
- * happened. Otherwise, return the propagated bit.
- */
-static int propagate_liveness_reg(struct bpf_verifier_env *env,
- struct bpf_reg_state *reg,
- struct bpf_reg_state *parent_reg)
-{
- u8 parent_flag = parent_reg->live & REG_LIVE_READ;
- u8 flag = reg->live & REG_LIVE_READ;
- int err;
-
- /* When comes here, read flags of PARENT_REG or REG could be any of
- * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
- * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
- */
- if (parent_flag == REG_LIVE_READ64 ||
- /* Or if there is no read flag from REG. */
- !flag ||
- /* Or if the read flag from REG is the same as PARENT_REG. */
- parent_flag == flag)
- return 0;
-
- err = mark_reg_read(env, reg, parent_reg, flag);
- if (err)
- return err;
-
- return flag;
-}
-
-/* A write screens off any subsequent reads; but write marks come from the
- * straight-line code between a state and its parent. When we arrive at an
- * equivalent state (jump target or such) we didn't arrive by the straight-line
- * code, so read marks in the state must propagate to the parent regardless
- * of the state's write marks. That's what 'parent == state->parent' comparison
- * in mark_reg_read() is for.
- */
-static int propagate_liveness(struct bpf_verifier_env *env,
- const struct bpf_verifier_state *vstate,
- struct bpf_verifier_state *vparent,
- bool *changed)
-{
- struct bpf_reg_state *state_reg, *parent_reg;
- struct bpf_func_state *state, *parent;
- int i, frame, err = 0;
- bool tmp = false;
-
- changed = changed ?: &tmp;
- if (vparent->curframe != vstate->curframe) {
- WARN(1, "propagate_live: parent frame %d current frame %d\n",
- vparent->curframe, vstate->curframe);
- return -EFAULT;
- }
- /* Propagate read liveness of registers... */
- BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
- for (frame = 0; frame <= vstate->curframe; frame++) {
- parent = vparent->frame[frame];
- state = vstate->frame[frame];
- parent_reg = parent->regs;
- state_reg = state->regs;
- /* We don't need to worry about FP liveness, it's read-only */
- for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
- err = propagate_liveness_reg(env, &state_reg[i],
- &parent_reg[i]);
- if (err < 0)
- return err;
- *changed |= err > 0;
- if (err == REG_LIVE_READ64)
- mark_insn_zext(env, &parent_reg[i]);
- }
-
- /* Propagate stack slots. */
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
- i < parent->allocated_stack / BPF_REG_SIZE; i++) {
- parent_reg = &parent->stack[i].spilled_ptr;
- state_reg = &state->stack[i].spilled_ptr;
- err = propagate_liveness_reg(env, state_reg,
- parent_reg);
- *changed |= err > 0;
- if (err < 0)
- return err;
- }
- }
- return 0;
-}
-
/* find precise scalars in the previous equivalent state and
* propagate them into the current state
*/
@@ -19201,8 +18979,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
first = true;
for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
if (state_reg->type != SCALAR_VALUE ||
- !state_reg->precise ||
- !(state_reg->live & REG_LIVE_READ))
+ !state_reg->precise)
continue;
if (env->log.level & BPF_LOG_LEVEL2) {
if (first)
@@ -19219,8 +18996,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
continue;
state_reg = &state->stack[i].spilled_ptr;
if (state_reg->type != SCALAR_VALUE ||
- !state_reg->precise ||
- !(state_reg->live & REG_LIVE_READ))
+ !state_reg->precise)
continue;
if (env->log.level & BPF_LOG_LEVEL2) {
if (first)
@@ -19270,9 +19046,6 @@ static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visi
changed = false;
for (backedge = visit->backedges; backedge; backedge = backedge->next) {
st = &backedge->state;
- err = propagate_liveness(env, st->equal_state, st, &changed);
- if (err)
- return err;
err = propagate_precision(env, st->equal_state, st, &changed);
if (err)
return err;
@@ -19296,7 +19069,7 @@ static bool states_maybe_looping(struct bpf_verifier_state *old,
fcur = cur->frame[fr];
for (i = 0; i < MAX_BPF_REG; i++)
if (memcmp(&fold->regs[i], &fcur->regs[i],
- offsetof(struct bpf_reg_state, parent)))
+ offsetof(struct bpf_reg_state, frameno)))
return false;
return true;
}
@@ -19394,7 +19167,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new;
bool force_new_state, add_new_state, loop;
- int i, j, n, err, states_cnt = 0;
+ int n, err, states_cnt = 0;
struct list_head *pos, *tmp, *head;
force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) ||
@@ -19551,28 +19324,16 @@ skip_inf_loop_check:
loop = incomplete_read_marks(env, &sl->state);
if (states_equal(env, &sl->state, cur, loop ? RANGE_WITHIN : NOT_EXACT)) {
hit:
- if (env->internal_error)
- return -EFAULT;
sl->hit_cnt++;
- /* reached equivalent register/stack state,
- * prune the search.
- * Registers read by the continuation are read by us.
- * If we have any write marks in env->cur_state, they
- * will prevent corresponding reads in the continuation
- * from reaching our parent (an explored_state). Our
- * own state will get the read marks recorded, but
- * they'll be immediately forgotten as we're pruning
- * this state and will pop a new one.
- */
- err = propagate_liveness(env, &sl->state, cur, NULL);
/* if previous state reached the exit with precision and
* current state is equivalent to it (except precision marks)
* the precision needs to be propagated back in
* the current state.
*/
+ err = 0;
if (is_jmp_point(env, env->insn_idx))
- err = err ? : push_jmp_history(env, cur, 0, 0);
+ err = push_jmp_history(env, cur, 0, 0);
err = err ? : propagate_precision(env, &sl->state, cur, NULL);
if (err)
return err;
@@ -19667,8 +19428,6 @@ hit:
return 1;
}
miss:
- if (env->internal_error)
- return -EFAULT;
/* when new state is not going to be added do not increase miss count.
* Otherwise several loop iterations will remove the state
* recorded earlier. The goal of these heuristics is to have
@@ -19754,38 +19513,6 @@ miss:
cur->dfs_depth = new->dfs_depth + 1;
clear_jmp_history(cur);
list_add(&new_sl->node, head);
-
- /* connect new state to parentage chain. Current frame needs all
- * registers connected. Only r6 - r9 of the callers are alive (pushed
- * to the stack implicitly by JITs) so in callers' frames connect just
- * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
- * the state of the call instruction (with WRITTEN set), and r0 comes
- * from callee with its full parentage chain, anyway.
- */
- /* clear write marks in current state: the writes we did are not writes
- * our child did, so they don't screen off its reads from us.
- * (There are no read marks in current state, because reads always mark
- * their parent and current state never has children yet. Only
- * explored_states can get read marks.)
- */
- for (j = 0; j <= cur->curframe; j++) {
- for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
- cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
- for (i = 0; i < BPF_REG_FP; i++)
- cur->frame[j]->regs[i].live = REG_LIVE_NONE;
- }
-
- /* all stack frames are accessible from callee, clear them all */
- for (j = 0; j <= cur->curframe; j++) {
- struct bpf_func_state *frame = cur->frame[j];
- struct bpf_func_state *newframe = new->frame[j];
-
- for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
- frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
- frame->stack[i].spilled_ptr.parent =
- &newframe->stack[i].spilled_ptr;
- }
- }
return 0;
}