summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-02-03 21:51:46 -0800
committerAndrii Nakryiko <andrii@kernel.org>2026-02-04 13:12:50 -0800
commit64873307e888505ccc45ef049dccdcfef42d2f54 (patch)
tree4a128d5e06280f68586417c6690c980dd26056af
parent67ee5ad27d5101be4e9e8980c0734a0423bfd0a7 (diff)
bpf: Add a recursion check to prevent loops in bpf_timer
Do not schedule timer/wq operation on a cpu that is in irq_work callback that is processing async_cmds queue. Otherwise the following loop is possible: bpf_timer_start() -> bpf_async_schedule_op() -> irq_work_queue(). irqrestore -> bpf_async_irq_worker() -> tracepoint -> bpf_timer_start(). Fixes: 1bfbc267ec91 ("bpf: Enable bpf_timer and bpf_wq in any context") Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20260204055147.54960-4-alexei.starovoitov@gmail.com
-rw-r--r--kernel/bpf/helpers.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 0517e9a8fc7c..01052f8664eb 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1427,9 +1427,23 @@ static int bpf_async_update_prog_callback(struct bpf_async_cb *cb,
return 0;
}
+static DEFINE_PER_CPU(struct bpf_async_cb *, async_cb_running);
+
static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op,
u64 nsec, u32 timer_mode)
{
+ /*
+ * Do not schedule another operation on this cpu if it's in irq_work
+ * callback that is processing async_cmds queue. Otherwise the following
+ * loop is possible:
+ * bpf_timer_start() -> bpf_async_schedule_op() -> irq_work_queue().
+ * irqrestore -> bpf_async_irq_worker() -> tracepoint -> bpf_timer_start().
+ */
+ if (this_cpu_read(async_cb_running) == cb) {
+ bpf_async_refcount_put(cb);
+ return -EDEADLK;
+ }
+
struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE);
if (!cmd) {
@@ -1628,6 +1642,7 @@ static void bpf_async_irq_worker(struct irq_work *work)
return;
list = llist_reverse_order(list);
+ this_cpu_write(async_cb_running, cb);
llist_for_each_safe(pos, n, list) {
struct bpf_async_cmd *cmd;
@@ -1635,6 +1650,7 @@ static void bpf_async_irq_worker(struct irq_work *work)
bpf_async_process_op(cb, cmd->op, cmd->nsec, cmd->mode);
kfree_nolock(cmd);
}
+ this_cpu_write(async_cb_running, NULL);
}
static void bpf_async_cancel_and_free(struct bpf_async_kern *async)