diff options
| author | Andrii Nakryiko <andrii@kernel.org> | 2026-02-03 16:58:48 -0800 |
|---|---|---|
| committer | Andrii Nakryiko <andrii@kernel.org> | 2026-02-03 16:58:48 -0800 |
| commit | b28dac3fc99bb6f1d0b029f1b742a96e7bc797d6 (patch) | |
| tree | c0bc2b18acde0d3f1fab17c59c049df7771b9620 /kernel | |
| parent | f11f7cf90ee09dbcf76413818063ffc38ed2d9fe (diff) | |
| parent | b135beb07758a854160e5421b6f4d5bde72e0da6 (diff) | |
Merge branch 'bpf-avoid-locks-in-bpf_timer-and-bpf_wq'
Alexei Starovoitov says:
====================
bpf: Avoid locks in bpf_timer and bpf_wq
From: Alexei Starovoitov <ast@kernel.org>
This series reworks implementation of BPF timer and workqueue APIs to
make them usable from any context.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Changes in v9:
- Different approach for patches 1 and 3:
- s/EBUSY/ENOENT/ when refcnt==0 to match existing
- drop latch, use refcnt and kmalloc_nolock() instead
- address race between timer/wq_start and delete_elem, add a test
- Link to v8: https://lore.kernel.org/bpf/20260127-timer_nolock-v8-0-5a29a9571059@meta.com/
Changes in v8:
- Return -EBUSY in bpf_async_read_op() if last_seq is failed to be set
- In bpf_async_cancel_and_free() drop bpf_async_cb ref after calling bpf_async_process()
- Link to v7: https://lore.kernel.org/r/20260122-timer_nolock-v7-0-04a45c55c2e2@meta.com
Changes in v7:
- Addressed Andrii's review points from the previous version - nothing
very significang.
- Added NMI stress tests for bpf_timer - hit few verifier failing checks
and removed them.
- Address sparse warning in the bpf_async_update_prog_callback()
- Link to v6: https://lore.kernel.org/r/20260120-timer_nolock-v6-0-670ffdd787b4@meta.com
Changes in v6:
- Reworked destruction and refcnt use:
- On cancel_and_free() set last_seq to BPF_ASYNC_DESTROY value, drop
map's reference
- In irq work callback, atomically switch DESTROY to DESTROYED, cancel
timer/wq
- Free bpf_async_cb on refcnt going to 0.
- Link to v5: https://lore.kernel.org/r/20260115-timer_nolock-v5-0-15e3aef2703d@meta.com
Changes in v5:
- Extracted lock-free algorithm for updating cb->prog and
cb->callback_fn into a function bpf_async_update_prog_callback(),
added a new commit and introduces this function and uses it in
__bpf_async_set_callback(), bpf_timer_cancel() and
bpf_async_cancel_and_free().
This allows to move the change into the separate commit without breaking
correctness.
- Handle NULL prog in bpf_async_update_prog_callback().
- Link to v4: https://lore.kernel.org/r/20260114-timer_nolock-v4-0-fa6355f51fa7@meta.com
Changes in v4:
- Handle irq_work_queue failures in both schedule and cancel_and_free
paths: introduced bpf_async_refcnt_dec_cleanup() that decrements refcnt
and makes sure if last reference is put, there is at least one irq_work
scheduled to execute final cleanup.
- Additional refcnt inc/dec in set_callback() + rcu lock to make sure
cleanup is not running at the same time as set_callback().
- Added READ_ONCE where it was needed.
- Squash 'bpf: Refactor __bpf_async_set_callback()' commit into 'bpf:
Add lock-free cell for NMI-safe
async operations'
- Removed mpmc_cell, use seqcount_latch_t instead.
- Link to v3: https://lore.kernel.org/r/20260107-timer_nolock-v3-0-740d3ec3e5f9@meta.com
Changes in v3:
- Major rework
- Introduce mpmc_cell, allowing concurrent writes and reads
- Implement irq_work deferring
- Adding selftests
- Introduces bpf_timer_cancel_async kfunc
- Link to v2: https://lore.kernel.org/r/20251105-timer_nolock-v2-0-32698db08bfa@meta.com
Changes in v2:
- Move refcnt initialization and put (from cancel_and_free())
from patch 5 into the patch 4, so that patch 4 has more clear and full
implementation and use of refcnt
- Link to v1: https://lore.kernel.org/r/20251031-timer_nolock-v1-0-b064ae403bfb@meta.com
====================
Link: https://patch.msgid.link/20260201025403.66625-1-alexei.starovoitov@gmail.com
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/helpers.c | 456 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 55 |
2 files changed, 310 insertions, 201 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index c30a9f68af6b..d4aedac14a60 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1095,16 +1095,34 @@ static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx) return (void *)value - round_up(map->key_size, 8); } +enum bpf_async_type { + BPF_ASYNC_TYPE_TIMER = 0, + BPF_ASYNC_TYPE_WQ, +}; + +enum bpf_async_op { + BPF_ASYNC_START, + BPF_ASYNC_CANCEL +}; + +struct bpf_async_cmd { + struct llist_node node; + u64 nsec; + u32 mode; + enum bpf_async_op op; +}; + struct bpf_async_cb { struct bpf_map *map; struct bpf_prog *prog; void __rcu *callback_fn; void *value; - union { - struct rcu_head rcu; - struct work_struct delete_work; - }; + struct rcu_head rcu; u64 flags; + struct irq_work worker; + refcount_t refcnt; + enum bpf_async_type type; + struct llist_head async_cmds; }; /* BPF map elements can contain 'struct bpf_timer'. @@ -1132,7 +1150,6 @@ struct bpf_hrtimer { struct bpf_work { struct bpf_async_cb cb; struct work_struct work; - struct work_struct delete_work; }; /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */ @@ -1142,20 +1159,12 @@ struct bpf_async_kern { struct bpf_hrtimer *timer; struct bpf_work *work; }; - /* bpf_spin_lock is used here instead of spinlock_t to make - * sure that it always fits into space reserved by struct bpf_timer - * regardless of LOCKDEP and spinlock debug flags. - */ - struct bpf_spin_lock lock; } __attribute__((aligned(8))); -enum bpf_async_type { - BPF_ASYNC_TYPE_TIMER = 0, - BPF_ASYNC_TYPE_WQ, -}; - static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); +static void bpf_async_refcount_put(struct bpf_async_cb *cb); + static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) { struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); @@ -1219,45 +1228,73 @@ static void bpf_async_cb_rcu_free(struct rcu_head *rcu) { struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu); + /* + * Drop the last reference to prog only after RCU GP, as set_callback() + * may race with cancel_and_free() + */ + if (cb->prog) + bpf_prog_put(cb->prog); + kfree_nolock(cb); } -static void bpf_wq_delete_work(struct work_struct *work) +/* Callback from call_rcu_tasks_trace, chains to call_rcu for final free */ +static void bpf_async_cb_rcu_tasks_trace_free(struct rcu_head *rcu) { - struct bpf_work *w = container_of(work, struct bpf_work, delete_work); + struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu); + struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb); + struct bpf_work *w = container_of(cb, struct bpf_work, cb); + bool retry = false; - cancel_work_sync(&w->work); + /* + * bpf_async_cancel_and_free() tried to cancel timer/wq, but it + * could have raced with timer/wq_start. Now refcnt is zero and + * srcu/rcu GP completed. Cancel timer/wq again. + */ + switch (cb->type) { + case BPF_ASYNC_TYPE_TIMER: + if (hrtimer_try_to_cancel(&t->timer) < 0) + retry = true; + break; + case BPF_ASYNC_TYPE_WQ: + if (!cancel_work(&w->work)) + retry = true; + break; + } + if (retry) { + /* + * hrtimer or wq callback may still be running. It must be + * in rcu_tasks_trace or rcu CS, so wait for GP again. + * It won't retry forever, since refcnt zero prevents all + * operations on timer/wq. + */ + call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); + return; + } - call_rcu(&w->cb.rcu, bpf_async_cb_rcu_free); + /* rcu_trace_implies_rcu_gp() is true and will remain so */ + bpf_async_cb_rcu_free(rcu); } -static void bpf_timer_delete_work(struct work_struct *work) +static void bpf_async_refcount_put(struct bpf_async_cb *cb) { - struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); + if (!refcount_dec_and_test(&cb->refcnt)) + return; - /* Cancel the timer and wait for callback to complete if it was running. - * If hrtimer_cancel() can be safely called it's safe to call - * call_rcu() right after for both preallocated and non-preallocated - * maps. The async->cb = NULL was already done and no code path can see - * address 't' anymore. Timer if armed for existing bpf_hrtimer before - * bpf_timer_cancel_and_free will have been cancelled. - */ - hrtimer_cancel(&t->timer); - call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free); + call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); } +static void bpf_async_cancel_and_free(struct bpf_async_kern *async); +static void bpf_async_irq_worker(struct irq_work *work); + static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, enum bpf_async_type type) { - struct bpf_async_cb *cb; + struct bpf_async_cb *cb, *old_cb; struct bpf_hrtimer *t; struct bpf_work *w; clockid_t clockid; size_t size; - int ret = 0; - - if (in_nmi()) - return -EOPNOTSUPP; switch (type) { case BPF_ASYNC_TYPE_TIMER: @@ -1270,18 +1307,13 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u return -EINVAL; } - __bpf_spin_lock_irqsave(&async->lock); - t = async->timer; - if (t) { - ret = -EBUSY; - goto out; - } + old_cb = READ_ONCE(async->cb); + if (old_cb) + return -EBUSY; cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node); - if (!cb) { - ret = -ENOMEM; - goto out; - } + if (!cb) + return -ENOMEM; switch (type) { case BPF_ASYNC_TYPE_TIMER: @@ -1289,7 +1321,6 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u t = (struct bpf_hrtimer *)cb; atomic_set(&t->cancelling, 0); - INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT); cb->value = (void *)async - map->record->timer_off; break; @@ -1297,16 +1328,24 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u w = (struct bpf_work *)cb; INIT_WORK(&w->work, bpf_wq_work); - INIT_WORK(&w->delete_work, bpf_wq_delete_work); cb->value = (void *)async - map->record->wq_off; break; } cb->map = map; cb->prog = NULL; cb->flags = flags; + cb->worker = IRQ_WORK_INIT(bpf_async_irq_worker); + init_llist_head(&cb->async_cmds); + refcount_set(&cb->refcnt, 1); /* map's reference */ + cb->type = type; rcu_assign_pointer(cb->callback_fn, NULL); - WRITE_ONCE(async->cb, cb); + old_cb = cmpxchg(&async->cb, NULL, cb); + if (old_cb) { + /* Lost the race to initialize this bpf_async_kern, drop the allocated object */ + kfree_nolock(cb); + return -EBUSY; + } /* Guarantee the order between async->cb and map->usercnt. So * when there are concurrent uref release and bpf timer init, either * bpf_timer_cancel_and_free() called by uref release reads a no-NULL @@ -1317,13 +1356,11 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u /* maps with timers must be either held by user space * or pinned in bpffs. */ - WRITE_ONCE(async->cb, NULL); - kfree_nolock(cb); - ret = -EPERM; + bpf_async_cancel_and_free(async); + return -EPERM; } -out: - __bpf_spin_unlock_irqrestore(&async->lock); - return ret; + + return 0; } BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, @@ -1354,8 +1391,9 @@ static const struct bpf_func_proto bpf_timer_init_proto = { .arg3_type = ARG_ANYTHING, }; -static int bpf_async_update_prog_callback(struct bpf_async_cb *cb, void *callback_fn, - struct bpf_prog *prog) +static int bpf_async_update_prog_callback(struct bpf_async_cb *cb, + struct bpf_prog *prog, + void *callback_fn) { struct bpf_prog *prev; @@ -1380,7 +1418,8 @@ static int bpf_async_update_prog_callback(struct bpf_async_cb *cb, void *callbac if (prev) bpf_prog_put(prev); - } while (READ_ONCE(cb->prog) != prog || READ_ONCE(cb->callback_fn) != callback_fn); + } while (READ_ONCE(cb->prog) != prog || + (void __force *)READ_ONCE(cb->callback_fn) != callback_fn); if (prog) bpf_prog_put(prog); @@ -1388,33 +1427,36 @@ static int bpf_async_update_prog_callback(struct bpf_async_cb *cb, void *callbac return 0; } +static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op, + u64 nsec, u32 timer_mode) +{ + WARN_ON_ONCE(!in_hardirq()); + + struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE); + + if (!cmd) { + bpf_async_refcount_put(cb); + return -ENOMEM; + } + init_llist_node(&cmd->node); + cmd->nsec = nsec; + cmd->mode = timer_mode; + cmd->op = op; + if (llist_add(&cmd->node, &cb->async_cmds)) + irq_work_queue(&cb->worker); + return 0; +} + static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, struct bpf_prog *prog) { struct bpf_async_cb *cb; - int ret = 0; - if (in_nmi()) - return -EOPNOTSUPP; - __bpf_spin_lock_irqsave(&async->lock); - cb = async->cb; - if (!cb) { - ret = -EINVAL; - goto out; - } - if (!atomic64_read(&cb->map->usercnt)) { - /* maps with timers must be either held by user space - * or pinned in bpffs. Otherwise timer might still be - * running even when bpf prog is detached and user space - * is gone, since map_release_uref won't ever be called. - */ - ret = -EPERM; - goto out; - } - ret = bpf_async_update_prog_callback(cb, callback_fn, prog); -out: - __bpf_spin_unlock_irqrestore(&async->lock); - return ret; + cb = READ_ONCE(async->cb); + if (!cb) + return -EINVAL; + + return bpf_async_update_prog_callback(cb, prog, callback_fn); } BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, @@ -1431,22 +1473,17 @@ static const struct bpf_func_proto bpf_timer_set_callback_proto = { .arg2_type = ARG_PTR_TO_FUNC, }; -BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) +BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, flags) { struct bpf_hrtimer *t; - int ret = 0; - enum hrtimer_mode mode; + u32 mode; - if (in_nmi()) - return -EOPNOTSUPP; if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) return -EINVAL; - __bpf_spin_lock_irqsave(&timer->lock); - t = timer->timer; - if (!t || !t->cb.prog) { - ret = -EINVAL; - goto out; - } + + t = READ_ONCE(async->timer); + if (!t || !READ_ONCE(t->cb.prog)) + return -EINVAL; if (flags & BPF_F_TIMER_ABS) mode = HRTIMER_MODE_ABS_SOFT; @@ -1456,10 +1493,20 @@ BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, fla if (flags & BPF_F_TIMER_CPU_PIN) mode |= HRTIMER_MODE_PINNED; - hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); -out: - __bpf_spin_unlock_irqrestore(&timer->lock); - return ret; + /* + * bpf_async_cancel_and_free() could have dropped refcnt to zero. In + * such case BPF progs are not allowed to arm the timer to prevent UAF. + */ + if (!refcount_inc_not_zero(&t->cb.refcnt)) + return -ENOENT; + + if (!in_hardirq()) { + hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); + bpf_async_refcount_put(&t->cb); + return 0; + } else { + return bpf_async_schedule_op(&t->cb, BPF_ASYNC_START, nsecs, mode); + } } static const struct bpf_func_proto bpf_timer_start_proto = { @@ -1477,11 +1524,9 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, async) bool inc = false; int ret = 0; - if (in_nmi()) + if (in_hardirq()) return -EOPNOTSUPP; - guard(rcu)(); - t = READ_ONCE(async->timer); if (!t) return -EINVAL; @@ -1536,78 +1581,85 @@ static const struct bpf_func_proto bpf_timer_cancel_proto = { .arg1_type = ARG_PTR_TO_TIMER, }; -static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) +static void bpf_async_process_op(struct bpf_async_cb *cb, u32 op, + u64 timer_nsec, u32 timer_mode) +{ + switch (cb->type) { + case BPF_ASYNC_TYPE_TIMER: { + struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb); + + switch (op) { + case BPF_ASYNC_START: + hrtimer_start(&t->timer, ns_to_ktime(timer_nsec), timer_mode); + break; + case BPF_ASYNC_CANCEL: + hrtimer_try_to_cancel(&t->timer); + break; + } + break; + } + case BPF_ASYNC_TYPE_WQ: { + struct bpf_work *w = container_of(cb, struct bpf_work, cb); + + switch (op) { + case BPF_ASYNC_START: + schedule_work(&w->work); + break; + case BPF_ASYNC_CANCEL: + cancel_work(&w->work); + break; + } + break; + } + } + bpf_async_refcount_put(cb); +} + +static void bpf_async_irq_worker(struct irq_work *work) +{ + struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker); + struct llist_node *pos, *n, *list; + + list = llist_del_all(&cb->async_cmds); + if (!list) + return; + + list = llist_reverse_order(list); + llist_for_each_safe(pos, n, list) { + struct bpf_async_cmd *cmd; + + cmd = container_of(pos, struct bpf_async_cmd, node); + bpf_async_process_op(cb, cmd->op, cmd->nsec, cmd->mode); + kfree_nolock(cmd); + } +} + +static void bpf_async_cancel_and_free(struct bpf_async_kern *async) { struct bpf_async_cb *cb; - /* Performance optimization: read async->cb without lock first. */ if (!READ_ONCE(async->cb)) - return NULL; + return; - __bpf_spin_lock_irqsave(&async->lock); - /* re-read it under lock */ - cb = async->cb; + cb = xchg(&async->cb, NULL); if (!cb) - goto out; - bpf_async_update_prog_callback(cb, NULL, NULL); - /* The subsequent bpf_timer_start/cancel() helpers won't be able to use - * this timer, since it won't be initialized. - */ - WRITE_ONCE(async->cb, NULL); -out: - __bpf_spin_unlock_irqrestore(&async->lock); - return cb; -} + return; -static void bpf_timer_delete(struct bpf_hrtimer *t) -{ /* - * We check that bpf_map_delete/update_elem() was called from timer - * callback_fn. In such case we don't call hrtimer_cancel() (since it - * will deadlock) and don't call hrtimer_try_to_cancel() (since it will - * just return -1). Though callback_fn is still running on this cpu it's - * safe to do kfree(t) because bpf_timer_cb() read everything it needed - * from 't'. The bpf subprog callback_fn won't be able to access 't', - * since async->cb = NULL was already done. The timer will be - * effectively cancelled because bpf_timer_cb() will return - * HRTIMER_NORESTART. - * - * However, it is possible the timer callback_fn calling us armed the - * timer _before_ calling us, such that failing to cancel it here will - * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. - * Therefore, we _need_ to cancel any outstanding timers before we do - * call_rcu, even though no more timers can be armed. - * - * Moreover, we need to schedule work even if timer does not belong to - * the calling callback_fn, as on two different CPUs, we can end up in a - * situation where both sides run in parallel, try to cancel one - * another, and we end up waiting on both sides in hrtimer_cancel - * without making forward progress, since timer1 depends on time2 - * callback to finish, and vice versa. - * - * CPU 1 (timer1_cb) CPU 2 (timer2_cb) - * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) - * - * To avoid these issues, punt to workqueue context when we are in a - * timer callback. + * No refcount_inc_not_zero(&cb->refcnt) here. Dropping the last + * refcnt. Either synchronously or asynchronously in irq_work. */ - if (this_cpu_read(hrtimer_running)) { - queue_work(system_dfl_wq, &t->cb.delete_work); - return; - } - if (IS_ENABLED(CONFIG_PREEMPT_RT)) { - /* If the timer is running on other CPU, also use a kworker to - * wait for the completion of the timer instead of trying to - * acquire a sleepable lock in hrtimer_cancel() to wait for its - * completion. - */ - if (hrtimer_try_to_cancel(&t->timer) >= 0) - call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free); - else - queue_work(system_dfl_wq, &t->cb.delete_work); + if (!in_hardirq()) { + bpf_async_process_op(cb, BPF_ASYNC_CANCEL, 0, 0); } else { - bpf_timer_delete_work(&t->cb.delete_work); + (void)bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0); + /* + * bpf_async_schedule_op() either enqueues allocated cmd into llist + * or fails with ENOMEM and drop the last refcnt. + * This is unlikely, but safe, since bpf_async_cb_rcu_tasks_trace_free() + * callback will do additional timer/wq_cancel due to races anyway. + */ } } @@ -1617,33 +1669,16 @@ static void bpf_timer_delete(struct bpf_hrtimer *t) */ void bpf_timer_cancel_and_free(void *val) { - struct bpf_hrtimer *t; - - t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val); - if (!t) - return; - - bpf_timer_delete(t); + bpf_async_cancel_and_free(val); } -/* This function is called by map_delete/update_elem for individual element and +/* + * This function is called by map_delete/update_elem for individual element and * by ops->map_release_uref when the user space reference to a map reaches zero. */ void bpf_wq_cancel_and_free(void *val) { - struct bpf_work *work; - - BTF_TYPE_EMIT(struct bpf_wq); - - work = (struct bpf_work *)__bpf_async_cancel_and_free(val); - if (!work) - return; - /* Trigger cancel of the sleepable work, but *do not* wait for - * it to finish if it was running as we might not be in a - * sleepable context. - * kfree will be called once the work has finished. - */ - schedule_work(&work->delete_work); + bpf_async_cancel_and_free(val); } BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr) @@ -3116,16 +3151,23 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) struct bpf_async_kern *async = (struct bpf_async_kern *)wq; struct bpf_work *w; - if (in_nmi()) - return -EOPNOTSUPP; if (flags) return -EINVAL; + w = READ_ONCE(async->work); if (!w || !READ_ONCE(w->cb.prog)) return -EINVAL; - schedule_work(&w->work); - return 0; + if (!refcount_inc_not_zero(&w->cb.refcnt)) + return -ENOENT; + + if (!in_hardirq()) { + schedule_work(&w->work); + bpf_async_refcount_put(&w->cb); + return 0; + } else { + return bpf_async_schedule_op(&w->cb, BPF_ASYNC_START, 0, 0); + } } __bpf_kfunc int bpf_wq_set_callback(struct bpf_wq *wq, @@ -4384,6 +4426,53 @@ __bpf_kfunc int bpf_dynptr_file_discard(struct bpf_dynptr *dynptr) return 0; } +/** + * bpf_timer_cancel_async - try to deactivate a timer + * @timer: bpf_timer to stop + * + * Returns: + * + * * 0 when the timer was not active + * * 1 when the timer was active + * * -1 when the timer is currently executing the callback function and + * cannot be stopped + * * -ECANCELED when the timer will be cancelled asynchronously + * * -ENOMEM when out of memory + * * -EINVAL when the timer was not initialized + * * -ENOENT when this kfunc is racing with timer deletion + */ +__bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer) +{ + struct bpf_async_kern *async = (void *)timer; + struct bpf_async_cb *cb; + int ret; + + cb = READ_ONCE(async->cb); + if (!cb) + return -EINVAL; + + /* + * Unlike hrtimer_start() it's ok to synchronously call + * hrtimer_try_to_cancel() when refcnt reached zero, but deferring to + * irq_work is not, since irq callback may execute after RCU GP and + * cb could be freed at that time. Check for refcnt zero for + * consistency. + */ + if (!refcount_inc_not_zero(&cb->refcnt)) + return -ENOENT; + + if (!in_hardirq()) { + struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb); + + ret = hrtimer_try_to_cancel(&t->timer); + bpf_async_refcount_put(cb); + return ret; + } else { + ret = bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0); + return ret ? ret : -ECANCELED; + } +} + __bpf_kfunc_end_defs(); static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work) @@ -4567,6 +4656,7 @@ BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_dynptr_from_file) BTF_ID_FLAGS(func, bpf_dynptr_file_discard) +BTF_ID_FLAGS(func, bpf_timer_cancel_async) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6a616dc4dc54..40a8252140fb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -8675,13 +8675,25 @@ static int check_map_field_pointer(struct bpf_verifier_env *env, u32 regno, } static int process_timer_func(struct bpf_verifier_env *env, int regno, - struct bpf_call_arg_meta *meta) + struct bpf_map_desc *map) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) { verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n"); return -EOPNOTSUPP; } - return check_map_field_pointer(env, regno, BPF_TIMER, &meta->map); + return check_map_field_pointer(env, regno, BPF_TIMER, map); +} + +static int process_timer_helper(struct bpf_verifier_env *env, int regno, + struct bpf_call_arg_meta *meta) +{ + return process_timer_func(env, regno, &meta->map); +} + +static int process_timer_kfunc(struct bpf_verifier_env *env, int regno, + struct bpf_kfunc_call_arg_meta *meta) +{ + return process_timer_func(env, regno, &meta->map); } static int process_kptr_func(struct bpf_verifier_env *env, int regno, @@ -9973,7 +9985,7 @@ skip_type_check: } break; case ARG_PTR_TO_TIMER: - err = process_timer_func(env, regno, meta); + err = process_timer_helper(env, regno, meta); if (err) return err; break; @@ -12238,7 +12250,8 @@ enum { KF_ARG_WORKQUEUE_ID, KF_ARG_RES_SPIN_LOCK_ID, KF_ARG_TASK_WORK_ID, - KF_ARG_PROG_AUX_ID + KF_ARG_PROG_AUX_ID, + KF_ARG_TIMER_ID }; BTF_ID_LIST(kf_arg_btf_ids) @@ -12251,6 +12264,7 @@ BTF_ID(struct, bpf_wq) BTF_ID(struct, bpf_res_spin_lock) BTF_ID(struct, bpf_task_work) BTF_ID(struct, bpf_prog_aux) +BTF_ID(struct, bpf_timer) static bool __is_kfunc_ptr_arg_type(const struct btf *btf, const struct btf_param *arg, int type) @@ -12294,6 +12308,11 @@ static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_par return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID); } +static bool is_kfunc_arg_timer(const struct btf *btf, const struct btf_param *arg) +{ + return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_TIMER_ID); +} + static bool is_kfunc_arg_wq(const struct btf *btf, const struct btf_param *arg) { return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_WORKQUEUE_ID); @@ -12393,6 +12412,7 @@ enum kfunc_ptr_arg_type { KF_ARG_PTR_TO_NULL, KF_ARG_PTR_TO_CONST_STR, KF_ARG_PTR_TO_MAP, + KF_ARG_PTR_TO_TIMER, KF_ARG_PTR_TO_WORKQUEUE, KF_ARG_PTR_TO_IRQ_FLAG, KF_ARG_PTR_TO_RES_SPIN_LOCK, @@ -12646,6 +12666,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (is_kfunc_arg_wq(meta->btf, &args[argno])) return KF_ARG_PTR_TO_WORKQUEUE; + if (is_kfunc_arg_timer(meta->btf, &args[argno])) + return KF_ARG_PTR_TO_TIMER; + if (is_kfunc_arg_task_work(meta->btf, &args[argno])) return KF_ARG_PTR_TO_TASK_WORK; @@ -13439,6 +13462,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ case KF_ARG_PTR_TO_REFCOUNTED_KPTR: case KF_ARG_PTR_TO_CONST_STR: case KF_ARG_PTR_TO_WORKQUEUE: + case KF_ARG_PTR_TO_TIMER: case KF_ARG_PTR_TO_TASK_WORK: case KF_ARG_PTR_TO_IRQ_FLAG: case KF_ARG_PTR_TO_RES_SPIN_LOCK: @@ -13738,6 +13762,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ if (ret < 0) return ret; break; + case KF_ARG_PTR_TO_TIMER: + if (reg->type != PTR_TO_MAP_VALUE) { + verbose(env, "arg#%d doesn't point to a map value\n", i); + return -EINVAL; + } + ret = process_timer_kfunc(env, regno, meta); + if (ret < 0) + return ret; + break; case KF_ARG_PTR_TO_TASK_WORK: if (reg->type != PTR_TO_MAP_VALUE) { verbose(env, "arg#%d doesn't point to a map value\n", i); @@ -21429,20 +21462,6 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, } } - if (btf_record_has_field(map->record, BPF_TIMER)) { - if (is_tracing_prog_type(prog_type)) { - verbose(env, "tracing progs cannot use bpf_timer yet\n"); - return -EINVAL; - } - } - - if (btf_record_has_field(map->record, BPF_WORKQUEUE)) { - if (is_tracing_prog_type(prog_type)) { - verbose(env, "tracing progs cannot use bpf_wq yet\n"); - return -EINVAL; - } - } - if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); |
