diff options
| author | Andrii Nakryiko <andrii@kernel.org> | 2023-09-25 16:37:45 -0700 |
|---|---|---|
| committer | Andrii Nakryiko <andrii@kernel.org> | 2023-09-25 16:37:53 -0700 |
| commit | 0e73ef1d8c09a1f21ca2b19684819dbbcdce00f9 (patch) | |
| tree | c693b65caa5a8a700c04677fcce5e8db95a7a706 /include | |
| parent | e0fa6523e02ab31a27daf52e58a0d95fd52d5300 (diff) | |
| parent | 85981e0f9e9fc882578f0ad7488d6c59193dd187 (diff) | |
Merge branch 'bpf: Add missed stats for kprobes'
Jiri Olsa says:
====================
hi,
at the moment we can't retrieve the number of missed kprobe
executions and subsequent execution of BPF programs.
This patchset adds:
- counting of missed execution on attach layer for:
. kprobes attached through perf link (kprobe/ftrace)
. kprobes attached through kprobe.multi link (fprobe)
- counting of recursion_misses for BPF kprobe programs
It's still technically possible to create kprobe without perf link (using
SET_BPF perf ioctl) in which case we don't have a way to retrieve the kprobe's
'missed' count. However both libbpf and cilium/ebpf libraries use perf link
if it's available, and for old kernels without perf link support we can use
BPF program to retrieve the kprobe missed count.
v3 changes:
- added acks [Song]
- make test_missed not serial [Andrii]
Also available at:
https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git
bpf/missed_stats
thanks,
jirka
====================
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/bpf.h | 16 | ||||
| -rw-r--r-- | include/linux/trace_events.h | 6 | ||||
| -rw-r--r-- | include/uapi/linux/bpf.h | 2 |
3 files changed, 22 insertions, 2 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 30063a760b5a..a82efd34b741 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2922,6 +2922,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ +static __always_inline void +bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) +{ + const struct bpf_prog_array_item *item; + struct bpf_prog *prog; + + if (unlikely(!array)) + return; + + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + bpf_prog_inc_misses_counter(prog); + item++; + } +} + #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) void bpf_sk_reuseport_detach(struct sock *sk); int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 21ae37e49319..5eb88a66eb68 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -761,7 +761,8 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, - u64 *probe_offset, u64 *probe_addr); + u64 *probe_offset, u64 *probe_addr, + unsigned long *missed); int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else @@ -801,7 +802,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) static inline int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, - u64 *probe_addr) + u64 *probe_addr, unsigned long *missed) { return -EOPNOTSUPP; } @@ -877,6 +878,7 @@ extern void perf_kprobe_destroy(struct perf_event *event); extern int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, const char **symbol, u64 *probe_offset, u64 *probe_addr, + unsigned long *missed, bool perf_type_tracepoint); #endif #ifdef CONFIG_UPROBE_EVENTS diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5f13db15a3c7..70bfa997e896 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6532,6 +6532,7 @@ struct bpf_link_info { __aligned_u64 addrs; __u32 count; /* in/out: kprobe_multi function count */ __u32 flags; + __u64 missed; } kprobe_multi; struct { __u32 type; /* enum bpf_perf_event_type */ @@ -6547,6 +6548,7 @@ struct bpf_link_info { __u32 name_len; __u32 offset; /* offset from func_name */ __u64 addr; + __u64 missed; } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */ struct { __aligned_u64 tp_name; /* in/out */ |
