diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2024-09-03 10:46:00 -0700 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2024-09-05 16:56:14 +0200 |
| commit | 04b01625da130c7521b768996cd5e48052198b97 (patch) | |
| tree | d9f6d4adae5de1747d91f31fc70a9558c19e6127 /kernel/trace | |
| parent | cc01bd044e6a521d2cd128f685ee8d23ef0067f2 (diff) | |
perf/uprobe: split uprobe_unregister()
With uprobe_unregister() having grown a synchronize_srcu(), it becomes
fairly slow to call. Esp. since both users of this API call it in a
loop.
Peel off the sync_srcu() and do it once, after the loop.
We also need to add uprobe_unregister_sync() into uprobe_register()'s
error handling path, as we need to be careful about returning to the
caller before we have a guarantee that partially attached consumer won't
be called anymore. This is an unlikely slow path and this should be
totally fine to be slow in the case of a failed attach.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Co-developed-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Link: https://lore.kernel.org/r/20240903174603.3554182-6-andrii@kernel.org
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/bpf_trace.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 6 |
2 files changed, 9 insertions, 2 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index c99bf0617602..ac0a01cc8634 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3184,7 +3184,10 @@ static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt) u32 i; for (i = 0; i < cnt; i++) - uprobe_unregister(uprobes[i].uprobe, &uprobes[i].consumer); + uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer); + + if (cnt) + uprobe_unregister_sync(); } static void bpf_uprobe_multi_link_release(struct bpf_link *link) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 7eb79e0a5352..f7443e996b1b 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1097,6 +1097,7 @@ static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter) static void __probe_event_disable(struct trace_probe *tp) { struct trace_uprobe *tu; + bool sync = false; tu = container_of(tp, struct trace_uprobe, tp); WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); @@ -1105,9 +1106,12 @@ static void __probe_event_disable(struct trace_probe *tp) if (!tu->uprobe) continue; - uprobe_unregister(tu->uprobe, &tu->consumer); + uprobe_unregister_nosync(tu->uprobe, &tu->consumer); + sync = true; tu->uprobe = NULL; } + if (sync) + uprobe_unregister_sync(); } static int probe_event_enable(struct trace_event_call *call, |
