summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2025-10-10 10:04:51 -0700
committerAlexei Starovoitov <ast@kernel.org>2025-10-10 10:04:52 -0700
commit17566cf0e3629728b692e35213a4fd6ea9f86150 (patch)
treed06ab313435df1dd7fa7e4ad36bf57815e7a06e3 /kernel
parent56b4d162392dda2365fbc1f482184a24b489d07d (diff)
parent5b1b5d380ac7de39e9cb9de4209719b3949ebd3c (diff)
Merge branch 'fix-sleepable-context-tracking-for-async-callbacks'
Kumar Kartikeya Dwivedi says: ==================== Fix sleepable context tracking for async callbacks Currently, asynchronous execution primitives set up their callback execution simulation using push_async_cb, which will end up inheriting the sleepable or non-sleepable bit from the program triggering the simulation of the callback. This is incorrect, as the actual execution context of the asynchronous callback has nothing to do with the program arming its execution. This set fixes this oversight, and supplies a few test cases ensuring the correct behavior is tested across different types of primitives (i.e. timer, wq, task_work). While looking at this bug, it was noticed that the GFP flag setting logic for storage_get helpers is also broken, hence fix it while we are at it. PSA: These fixes and unit tests were primarily produced by prompting an AI assistant (Claude), and then modified in minor ways, in an exercise to understand how useful it can be at general kernel development tasks. Changelog: ---------- v1 -> v2 v1: https://lore.kernel.org/bpf/20251007014310.2889183-1-memxor@gmail.com * Squash fix for GFP flags into 1st commit. (Eduard) * Add a commit refactoring func_atomic to non_sleepable, make it generic, also set for kfuncs in addition to helpers. (Eduard) * Leave selftest as-is, coverage for global subprogs calling sleepable kfuncs or helpers is provided in rcu_read_lock.c. ==================== Link: https://patch.msgid.link/20251007220349.3852807-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/verifier.c54
1 files changed, 37 insertions, 17 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ff40e5e65c43..85a953124412 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -515,6 +515,7 @@ static bool is_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
+static bool is_task_work_add_kfunc(u32 func_id);
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{
@@ -547,6 +548,21 @@ static bool is_async_callback_calling_insn(struct bpf_insn *insn)
(bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
}
+static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn *insn)
+{
+ /* bpf_timer callbacks are never sleepable. */
+ if (bpf_helper_call(insn) && insn->imm == BPF_FUNC_timer_set_callback)
+ return false;
+
+ /* bpf_wq and bpf_task_work callbacks are always sleepable. */
+ if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
+ (is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
+ return true;
+
+ verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
+ return false;
+}
+
static bool is_may_goto_insn(struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
@@ -5826,8 +5842,7 @@ bad_type:
static bool in_sleepable(struct bpf_verifier_env *env)
{
- return env->prog->sleepable ||
- (env->cur_state && env->cur_state->in_sleepable);
+ return env->cur_state->in_sleepable;
}
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
@@ -10366,8 +10381,6 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
struct bpf_func_state *callee,
int insn_idx);
-static bool is_task_work_add_kfunc(u32 func_id);
-
static int set_callee_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee, int insn_idx);
@@ -10586,8 +10599,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
env->subprog_info[subprog].is_async_cb = true;
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
insn_idx, subprog,
- is_bpf_wq_set_callback_impl_kfunc(insn->imm) ||
- is_task_work_add_kfunc(insn->imm));
+ is_async_cb_sleepable(env, insn));
if (!async_cb)
return -EFAULT;
callee = async_cb->frame[0];
@@ -11359,6 +11371,15 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
return *ptr && (*ptr)->func ? 0 : -EINVAL;
}
+/* Check if we're in a sleepable context. */
+static inline bool in_sleepable_context(struct bpf_verifier_env *env)
+{
+ return !env->cur_state->active_rcu_lock &&
+ !env->cur_state->active_preempt_locks &&
+ !env->cur_state->active_irq_id &&
+ in_sleepable(env);
+}
+
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{
@@ -11425,9 +11446,6 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
func_id_name(func_id), func_id);
return -EINVAL;
}
-
- if (in_sleepable(env) && is_storage_get_function(func_id))
- env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
if (env->cur_state->active_preempt_locks) {
@@ -11436,9 +11454,6 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
func_id_name(func_id), func_id);
return -EINVAL;
}
-
- if (in_sleepable(env) && is_storage_get_function(func_id))
- env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
if (env->cur_state->active_irq_id) {
@@ -11447,11 +11462,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
func_id_name(func_id), func_id);
return -EINVAL;
}
-
- if (in_sleepable(env) && is_storage_get_function(func_id))
- env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
+ /* Track non-sleepable context for helpers. */
+ if (!in_sleepable_context(env))
+ env->insn_aux_data[insn_idx].non_sleepable = true;
+
meta.func_id = func_id;
/* check args */
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
@@ -13861,6 +13877,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EACCES;
}
+ /* Track non-sleepable context for kfuncs, same as for helpers. */
+ if (!in_sleepable_context(env))
+ insn_aux->non_sleepable = true;
+
/* Check the arguments */
err = check_kfunc_args(env, &meta, insn_idx);
if (err < 0)
@@ -22483,8 +22503,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
}
if (is_storage_get_function(insn->imm)) {
- if (!in_sleepable(env) ||
- env->insn_aux_data[i + delta].storage_get_func_atomic)
+ if (env->insn_aux_data[i + delta].non_sleepable)
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
else
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
@@ -23154,6 +23173,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
state->curframe = 0;
state->speculative = false;
state->branches = 1;
+ state->in_sleepable = env->prog->sleepable;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT);
if (!state->frame[0]) {
kfree(state);