summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMykyta Yatsenko <yatsenko@meta.com>2026-01-20 15:59:13 +0000
committerAlexei Starovoitov <ast@kernel.org>2026-01-20 18:12:19 -0800
commit83c9030cdc45e0518d71065c25201a24eafc9818 (patch)
tree0afd43321adf38d2a220f6b902f06eab2b39c62e /kernel
parent8bb1e32b3fac1becb4c1c8079d720784b8e33e34 (diff)
bpf: Simplify bpf_timer_cancel()
Remove lock from the bpf_timer_cancel() helper. The lock does not protect from concurrent modification of the bpf_async_cb data fields as those are modified in the callback without locking. Use guard(rcu)() instead of pair of explicit lock()/unlock(). Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com> Link: https://lore.kernel.org/r/20260120-timer_nolock-v6-4-670ffdd787b4@meta.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/helpers.c27
1 files changed, 11 insertions, 16 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 2a2df867bfe7..637677815365 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1471,7 +1471,7 @@ static const struct bpf_func_proto bpf_timer_start_proto = {
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
+BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, async)
{
struct bpf_hrtimer *t, *cur_t;
bool inc = false;
@@ -1479,13 +1479,12 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
if (in_nmi())
return -EOPNOTSUPP;
- rcu_read_lock();
- __bpf_spin_lock_irqsave(&timer->lock);
- t = timer->timer;
- if (!t) {
- ret = -EINVAL;
- goto out;
- }
+
+ guard(rcu)();
+
+ t = READ_ONCE(async->timer);
+ if (!t)
+ return -EINVAL;
cur_t = this_cpu_read(hrtimer_running);
if (cur_t == t) {
@@ -1493,8 +1492,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
* its own timer the hrtimer_cancel() will deadlock
* since it waits for callback_fn to finish.
*/
- ret = -EDEADLK;
- goto out;
+ return -EDEADLK;
}
/* Only account in-flight cancellations when invoked from a timer
@@ -1517,20 +1515,17 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
* cancelling and waiting for it synchronously, since it might
* do the same. Bail!
*/
- ret = -EDEADLK;
- goto out;
+ atomic_dec(&t->cancelling);
+ return -EDEADLK;
}
drop:
bpf_async_update_prog_callback(&t->cb, NULL, NULL);
-out:
- __bpf_spin_unlock_irqrestore(&timer->lock);
/* Cancel the timer and wait for associated callback to finish
* if it was running.
*/
- ret = ret ?: hrtimer_cancel(&t->timer);
+ ret = hrtimer_cancel(&t->timer);
if (inc)
atomic_dec(&t->cancelling);
- rcu_read_unlock();
return ret;
}