From dd8657894c11b03c6eb0fd53fe9d7fec2072d18b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 20 Sep 2023 23:31:40 +0200 Subject: bpf: Count missed stats in trace_call_bpf Increase misses stats in case bpf array execution is skipped because of recursion check in trace_call_bpf. Adding bpf_prog_inc_misses_counters that increase misses counts for all bpf programs in bpf_prog_array. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Tested-by: Song Liu Reviewed-by: Song Liu Link: https://lore.kernel.org/bpf/20230920213145.1941596-5-jolsa@kernel.org --- include/linux/bpf.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux/bpf.h') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 30063a760b5a..a82efd34b741 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2922,6 +2922,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ +static __always_inline void +bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) +{ + const struct bpf_prog_array_item *item; + struct bpf_prog *prog; + + if (unlikely(!array)) + return; + + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + bpf_prog_inc_misses_counter(prog); + item++; + } +} + #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) void bpf_sk_reuseport_detach(struct sock *sk); int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, -- cgit v1.2.3 From bc5bc309db45a7ab218ce8259ba9bc7659be61ca Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Thu, 5 Oct 2023 08:41:23 +0000 Subject: bpf: Inherit system settings for CPU security mitigations Currently, there exists a system-wide setting related to CPU security mitigations, denoted as 'mitigations='. When set to 'mitigations=off', it deactivates all optional CPU mitigations. Therefore, if we implement a system-wide 'mitigations=off' setting, it should inherently bypass Spectre v1 and Spectre v4 in the BPF subsystem. Please note that there is also a more specific 'nospectre_v1' setting on x86 and ppc architectures, though it is not currently exported. For the time being, let's disregard more fine-grained options. This idea emerged during our discussion about potential Spectre v1 attacks with Luis [0]. [0] https://lore.kernel.org/bpf/b4fc15f7-b204-767e-ebb9-fdb4233961fb@iogearbox.net Signed-off-by: Yafang Shao Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Acked-by: Song Liu Acked-by: KP Singh Cc: Luis Gerhorst Link: https://lore.kernel.org/bpf/20231005084123.1338-1-laoar.shao@gmail.com --- include/linux/bpf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/bpf.h') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a82efd34b741..61bde4520f5c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2164,12 +2164,12 @@ static inline bool bpf_allow_uninit_stack(void) static inline bool bpf_bypass_spec_v1(void) { - return perfmon_capable(); + return perfmon_capable() || cpu_mitigations_off(); } static inline bool bpf_bypass_spec_v4(void) { - return perfmon_capable(); + return perfmon_capable() || cpu_mitigations_off(); } int bpf_map_new_fd(struct bpf_map *map, int flags); -- cgit v1.2.3 From 236334aeec0f93217cf9235f2004e61a0a1a5985 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Fri, 13 Oct 2023 08:39:16 +0000 Subject: bpf: Avoid unnecessary audit log for CPU security mitigations Check cpu_mitigations_off() first to avoid calling capable() if it is off. This can avoid unnecessary audit log. Fixes: bc5bc309db45 ("bpf: Inherit system settings for CPU security mitigations") Suggested-by: Andrii Nakryiko Signed-off-by: Yafang Shao Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/CAEf4Bza6UVUWqcWQ-66weZ-nMDr+TFU3Mtq=dumZFD-pSqU7Ow@mail.gmail.com/ Link: https://lore.kernel.org/bpf/20231013083916.4199-1-laoar.shao@gmail.com --- include/linux/bpf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/bpf.h') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 61bde4520f5c..f0891ba24cb1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2164,12 +2164,12 @@ static inline bool bpf_allow_uninit_stack(void) static inline bool bpf_bypass_spec_v1(void) { - return perfmon_capable() || cpu_mitigations_off(); + return cpu_mitigations_off() || perfmon_capable(); } static inline bool bpf_bypass_spec_v4(void) { - return perfmon_capable() || cpu_mitigations_off(); + return cpu_mitigations_off() || perfmon_capable(); } int bpf_map_new_fd(struct bpf_map *map, int flags); -- cgit v1.2.3