From 31123c0360e01ee0389aee3a7b2ad32f13136662 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 16 Aug 2022 23:18:47 -0700 Subject: selftests/bpf: bpf_setsockopt tests This patch adds tests to exercise optnames that are allowed in bpf_setsockopt(). Reviewed-by: Stanislav Fomichev Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20220817061847.4182339-1-kafai@fb.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/setget_sockopt.c | 125 ++++++ .../testing/selftests/bpf/progs/bpf_tracing_net.h | 31 +- tools/testing/selftests/bpf/progs/setget_sockopt.c | 451 +++++++++++++++++++++ 3 files changed, 606 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/setget_sockopt.c create mode 100644 tools/testing/selftests/bpf/progs/setget_sockopt.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c new file mode 100644 index 000000000000..018611e6b248 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include +#include +#include + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" + +#include "setget_sockopt.skel.h" + +#define CG_NAME "/setget-sockopt-test" + +static const char addr4_str[] = "127.0.0.1"; +static const char addr6_str[] = "::1"; +static struct setget_sockopt *skel; +static int cg_fd; + +static int create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + if (!ASSERT_OK(system("ip link set dev lo up"), "set lo up")) + return -1; + + if (!ASSERT_OK(system("ip link add dev binddevtest1 type veth peer name binddevtest2"), + "add veth")) + return -1; + + if (!ASSERT_OK(system("ip link set dev binddevtest1 up"), + "bring veth up")) + return -1; + + return 0; +} + +static void test_tcp(int family) +{ + struct setget_sockopt__bss *bss = skel->bss; + int sfd, cfd; + + memset(bss, 0, sizeof(*bss)); + + sfd = start_server(family, SOCK_STREAM, + family == AF_INET6 ? addr6_str : addr4_str, 0, 0); + if (!ASSERT_GE(sfd, 0, "start_server")) + return; + + cfd = connect_to_fd(sfd, 0); + if (!ASSERT_GE(cfd, 0, "connect_to_fd_server")) { + close(sfd); + return; + } + close(sfd); + close(cfd); + + ASSERT_EQ(bss->nr_listen, 1, "nr_listen"); + ASSERT_EQ(bss->nr_connect, 1, "nr_connect"); + ASSERT_EQ(bss->nr_active, 1, "nr_active"); + ASSERT_EQ(bss->nr_passive, 1, "nr_passive"); + ASSERT_EQ(bss->nr_socket_post_create, 2, "nr_socket_post_create"); + ASSERT_EQ(bss->nr_binddev, 2, "nr_bind"); +} + +static void test_udp(int family) +{ + struct setget_sockopt__bss *bss = skel->bss; + int sfd; + + memset(bss, 0, sizeof(*bss)); + + sfd = start_server(family, SOCK_DGRAM, + family == AF_INET6 ? addr6_str : addr4_str, 0, 0); + if (!ASSERT_GE(sfd, 0, "start_server")) + return; + close(sfd); + + ASSERT_GE(bss->nr_socket_post_create, 1, "nr_socket_post_create"); + ASSERT_EQ(bss->nr_binddev, 1, "nr_bind"); +} + +void test_setget_sockopt(void) +{ + cg_fd = test__join_cgroup(CG_NAME); + if (cg_fd < 0) + return; + + if (create_netns()) + goto done; + + skel = setget_sockopt__open(); + if (!ASSERT_OK_PTR(skel, "open skel")) + goto done; + + strcpy(skel->rodata->veth, "binddevtest1"); + skel->rodata->veth_ifindex = if_nametoindex("binddevtest1"); + if (!ASSERT_GT(skel->rodata->veth_ifindex, 0, "if_nametoindex")) + goto done; + + if (!ASSERT_OK(setget_sockopt__load(skel), "load skel")) + goto done; + + skel->links.skops_sockopt = + bpf_program__attach_cgroup(skel->progs.skops_sockopt, cg_fd); + if (!ASSERT_OK_PTR(skel->links.skops_sockopt, "attach cgroup")) + goto done; + + skel->links.socket_post_create = + bpf_program__attach_cgroup(skel->progs.socket_post_create, cg_fd); + if (!ASSERT_OK_PTR(skel->links.socket_post_create, "attach_cgroup")) + goto done; + + test_tcp(AF_INET6); + test_tcp(AF_INET); + test_udp(AF_INET6); + test_udp(AF_INET); + +done: + setget_sockopt__destroy(skel); + close(cg_fd); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index 98dd2c4815f0..5ebc6dabef84 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -6,13 +6,40 @@ #define AF_INET6 10 #define SOL_SOCKET 1 +#define SO_REUSEADDR 2 #define SO_SNDBUF 7 -#define __SO_ACCEPTCON (1 << 16) +#define SO_RCVBUF 8 +#define SO_KEEPALIVE 9 #define SO_PRIORITY 12 +#define SO_REUSEPORT 15 +#define SO_RCVLOWAT 18 +#define SO_BINDTODEVICE 25 +#define SO_MARK 36 +#define SO_MAX_PACING_RATE 47 +#define SO_BINDTOIFINDEX 62 +#define SO_TXREHASH 74 +#define __SO_ACCEPTCON (1 << 16) + +#define IP_TOS 1 + +#define IPV6_TCLASS 67 +#define IPV6_AUTOFLOWLABEL 70 #define SOL_TCP 6 +#define TCP_NODELAY 1 +#define TCP_MAXSEG 2 +#define TCP_KEEPIDLE 4 +#define TCP_KEEPINTVL 5 +#define TCP_KEEPCNT 6 +#define TCP_SYNCNT 7 +#define TCP_WINDOW_CLAMP 10 #define TCP_CONGESTION 13 +#define TCP_THIN_LINEAR_TIMEOUTS 16 +#define TCP_USER_TIMEOUT 18 +#define TCP_NOTSENT_LOWAT 25 +#define TCP_SAVE_SYN 27 #define TCP_CA_NAME_MAX 16 +#define TCP_NAGLE_OFF 1 #define ICSK_TIME_RETRANS 1 #define ICSK_TIME_PROBE0 3 @@ -49,6 +76,8 @@ #define sk_state __sk_common.skc_state #define sk_v6_daddr __sk_common.skc_v6_daddr #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr +#define sk_flags __sk_common.skc_flags +#define sk_reuse __sk_common.skc_reuse #define s6_addr32 in6_u.u6_addr32 diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c new file mode 100644 index 000000000000..4a4cb44a4a15 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include "bpf_tracing_net.h" +#include +#include +#include + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +extern unsigned long CONFIG_HZ __kconfig; + +const volatile char veth[IFNAMSIZ]; +const volatile int veth_ifindex; + +int nr_listen; +int nr_passive; +int nr_active; +int nr_connect; +int nr_binddev; +int nr_socket_post_create; + +struct sockopt_test { + int opt; + int new; + int restore; + int expected; + int tcp_expected; + unsigned int flip:1; +}; + +static const char cubic_cc[] = "cubic"; +static const char reno_cc[] = "reno"; + +static const struct sockopt_test sol_socket_tests[] = { + { .opt = SO_REUSEADDR, .flip = 1, }, + { .opt = SO_SNDBUF, .new = 8123, .expected = 8123 * 2, }, + { .opt = SO_RCVBUF, .new = 8123, .expected = 8123 * 2, }, + { .opt = SO_KEEPALIVE, .flip = 1, }, + { .opt = SO_PRIORITY, .new = 0xeb9f, .expected = 0xeb9f, }, + { .opt = SO_REUSEPORT, .flip = 1, }, + { .opt = SO_RCVLOWAT, .new = 8123, .expected = 8123, }, + { .opt = SO_MARK, .new = 0xeb9f, .expected = 0xeb9f, }, + { .opt = SO_MAX_PACING_RATE, .new = 0xeb9f, .expected = 0xeb9f, }, + { .opt = SO_TXREHASH, .flip = 1, }, + { .opt = 0, }, +}; + +static const struct sockopt_test sol_tcp_tests[] = { + { .opt = TCP_NODELAY, .flip = 1, }, + { .opt = TCP_MAXSEG, .new = 1314, .expected = 1314, }, + { .opt = TCP_KEEPIDLE, .new = 123, .expected = 123, .restore = 321, }, + { .opt = TCP_KEEPINTVL, .new = 123, .expected = 123, .restore = 321, }, + { .opt = TCP_KEEPCNT, .new = 123, .expected = 123, .restore = 124, }, + { .opt = TCP_SYNCNT, .new = 123, .expected = 123, .restore = 124, }, + { .opt = TCP_WINDOW_CLAMP, .new = 8123, .expected = 8123, .restore = 8124, }, + { .opt = TCP_CONGESTION, }, + { .opt = TCP_THIN_LINEAR_TIMEOUTS, .flip = 1, }, + { .opt = TCP_USER_TIMEOUT, .new = 123400, .expected = 123400, }, + { .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, }, + { .opt = TCP_SAVE_SYN, .new = 1, .expected = 1, }, + { .opt = 0, }, +}; + +static const struct sockopt_test sol_ip_tests[] = { + { .opt = IP_TOS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, }, + { .opt = 0, }, +}; + +static const struct sockopt_test sol_ipv6_tests[] = { + { .opt = IPV6_TCLASS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, }, + { .opt = IPV6_AUTOFLOWLABEL, .flip = 1, }, + { .opt = 0, }, +}; + +struct loop_ctx { + void *ctx; + struct sock *sk; +}; + +static int __bpf_getsockopt(void *ctx, struct sock *sk, + int level, int opt, int *optval, + int optlen) +{ + if (level == SOL_SOCKET) { + switch (opt) { + case SO_REUSEADDR: + *optval = !!BPF_CORE_READ_BITFIELD(sk, sk_reuse); + break; + case SO_KEEPALIVE: + *optval = !!(sk->sk_flags & (1UL << 3)); + break; + case SO_RCVLOWAT: + *optval = sk->sk_rcvlowat; + break; + case SO_MAX_PACING_RATE: + *optval = sk->sk_max_pacing_rate; + break; + default: + return bpf_getsockopt(ctx, level, opt, optval, optlen); + } + return 0; + } + + if (level == IPPROTO_TCP) { + struct tcp_sock *tp = bpf_skc_to_tcp_sock(sk); + + if (!tp) + return -1; + + switch (opt) { + case TCP_NODELAY: + *optval = !!(BPF_CORE_READ_BITFIELD(tp, nonagle) & TCP_NAGLE_OFF); + break; + case TCP_MAXSEG: + *optval = tp->rx_opt.user_mss; + break; + case TCP_KEEPIDLE: + *optval = tp->keepalive_time / CONFIG_HZ; + break; + case TCP_SYNCNT: + *optval = tp->inet_conn.icsk_syn_retries; + break; + case TCP_KEEPINTVL: + *optval = tp->keepalive_intvl / CONFIG_HZ; + break; + case TCP_KEEPCNT: + *optval = tp->keepalive_probes; + break; + case TCP_WINDOW_CLAMP: + *optval = tp->window_clamp; + break; + case TCP_THIN_LINEAR_TIMEOUTS: + *optval = !!BPF_CORE_READ_BITFIELD(tp, thin_lto); + break; + case TCP_USER_TIMEOUT: + *optval = tp->inet_conn.icsk_user_timeout; + break; + case TCP_NOTSENT_LOWAT: + *optval = tp->notsent_lowat; + break; + case TCP_SAVE_SYN: + *optval = BPF_CORE_READ_BITFIELD(tp, save_syn); + break; + default: + return bpf_getsockopt(ctx, level, opt, optval, optlen); + } + return 0; + } + + if (level == IPPROTO_IPV6) { + switch (opt) { + case IPV6_AUTOFLOWLABEL: { + __u16 proto = sk->sk_protocol; + struct inet_sock *inet_sk; + + if (proto == IPPROTO_TCP) + inet_sk = (struct inet_sock *)bpf_skc_to_tcp_sock(sk); + else + inet_sk = (struct inet_sock *)bpf_skc_to_udp6_sock(sk); + + if (!inet_sk) + return -1; + + *optval = !!inet_sk->pinet6->autoflowlabel; + break; + } + default: + return bpf_getsockopt(ctx, level, opt, optval, optlen); + } + return 0; + } + + return bpf_getsockopt(ctx, level, opt, optval, optlen); +} + +static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, + const struct sockopt_test *t, + int level) +{ + int old, tmp, new, opt = t->opt; + + opt = t->opt; + + if (__bpf_getsockopt(ctx, sk, level, opt, &old, sizeof(old))) + return 1; + /* kernel initialized txrehash to 255 */ + if (level == SOL_SOCKET && opt == SO_TXREHASH && old != 0 && old != 1) + old = 1; + + new = !old; + if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new))) + return 1; + if (__bpf_getsockopt(ctx, sk, level, opt, &tmp, sizeof(tmp)) || + tmp != new) + return 1; + + if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old))) + return 1; + + return 0; +} + +static int bpf_test_sockopt_int(void *ctx, struct sock *sk, + const struct sockopt_test *t, + int level) +{ + int old, tmp, new, expected, opt; + + opt = t->opt; + new = t->new; + if (sk->sk_type == SOCK_STREAM && t->tcp_expected) + expected = t->tcp_expected; + else + expected = t->expected; + + if (__bpf_getsockopt(ctx, sk, level, opt, &old, sizeof(old)) || + old == new) + return 1; + + if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new))) + return 1; + if (__bpf_getsockopt(ctx, sk, level, opt, &tmp, sizeof(tmp)) || + tmp != expected) + return 1; + + if (t->restore) + old = t->restore; + if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old))) + return 1; + + return 0; +} + +static int bpf_test_socket_sockopt(__u32 i, struct loop_ctx *lc) +{ + const struct sockopt_test *t; + + if (i >= ARRAY_SIZE(sol_socket_tests)) + return 1; + + t = &sol_socket_tests[i]; + if (!t->opt) + return 1; + + if (t->flip) + return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, SOL_SOCKET); + + return bpf_test_sockopt_int(lc->ctx, lc->sk, t, SOL_SOCKET); +} + +static int bpf_test_ip_sockopt(__u32 i, struct loop_ctx *lc) +{ + const struct sockopt_test *t; + + if (i >= ARRAY_SIZE(sol_ip_tests)) + return 1; + + t = &sol_ip_tests[i]; + if (!t->opt) + return 1; + + if (t->flip) + return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IP); + + return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IP); +} + +static int bpf_test_ipv6_sockopt(__u32 i, struct loop_ctx *lc) +{ + const struct sockopt_test *t; + + if (i >= ARRAY_SIZE(sol_ipv6_tests)) + return 1; + + t = &sol_ipv6_tests[i]; + if (!t->opt) + return 1; + + if (t->flip) + return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IPV6); + + return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IPV6); +} + +static int bpf_test_tcp_sockopt(__u32 i, struct loop_ctx *lc) +{ + const struct sockopt_test *t; + struct sock *sk; + void *ctx; + + if (i >= ARRAY_SIZE(sol_tcp_tests)) + return 1; + + t = &sol_tcp_tests[i]; + if (!t->opt) + return 1; + + ctx = lc->ctx; + sk = lc->sk; + + if (t->opt == TCP_CONGESTION) { + char old_cc[16], tmp_cc[16]; + const char *new_cc; + + if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc))) + return 1; + if (!bpf_strncmp(old_cc, sizeof(old_cc), cubic_cc)) + new_cc = reno_cc; + else + new_cc = cubic_cc; + if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, (void *)new_cc, + sizeof(new_cc))) + return 1; + if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, tmp_cc, sizeof(tmp_cc))) + return 1; + if (bpf_strncmp(tmp_cc, sizeof(tmp_cc), new_cc)) + return 1; + if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc))) + return 1; + return 0; + } + + if (t->flip) + return bpf_test_sockopt_flip(ctx, sk, t, IPPROTO_TCP); + + return bpf_test_sockopt_int(ctx, sk, t, IPPROTO_TCP); +} + +static int bpf_test_sockopt(void *ctx, struct sock *sk) +{ + struct loop_ctx lc = { .ctx = ctx, .sk = sk, }; + __u16 family, proto; + int n; + + family = sk->sk_family; + proto = sk->sk_protocol; + + n = bpf_loop(ARRAY_SIZE(sol_socket_tests), bpf_test_socket_sockopt, &lc, 0); + if (n != ARRAY_SIZE(sol_socket_tests)) + return -1; + + if (proto == IPPROTO_TCP) { + n = bpf_loop(ARRAY_SIZE(sol_tcp_tests), bpf_test_tcp_sockopt, &lc, 0); + if (n != ARRAY_SIZE(sol_tcp_tests)) + return -1; + } + + if (family == AF_INET) { + n = bpf_loop(ARRAY_SIZE(sol_ip_tests), bpf_test_ip_sockopt, &lc, 0); + if (n != ARRAY_SIZE(sol_ip_tests)) + return -1; + } else { + n = bpf_loop(ARRAY_SIZE(sol_ipv6_tests), bpf_test_ipv6_sockopt, &lc, 0); + if (n != ARRAY_SIZE(sol_ipv6_tests)) + return -1; + } + + return 0; +} + +static int binddev_test(void *ctx) +{ + const char empty_ifname[] = ""; + int ifindex, zero = 0; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE, + (void *)veth, sizeof(veth))) + return -1; + if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX, + &ifindex, sizeof(int)) || + ifindex != veth_ifindex) + return -1; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE, + (void *)empty_ifname, sizeof(empty_ifname))) + return -1; + if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX, + &ifindex, sizeof(int)) || + ifindex != 0) + return -1; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX, + (void *)&veth_ifindex, sizeof(int))) + return -1; + if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX, + &ifindex, sizeof(int)) || + ifindex != veth_ifindex) + return -1; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX, + &zero, sizeof(int))) + return -1; + if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX, + &ifindex, sizeof(int)) || + ifindex != 0) + return -1; + + return 0; +} + +SEC("lsm_cgroup/socket_post_create") +int BPF_PROG(socket_post_create, struct socket *sock, int family, + int type, int protocol, int kern) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 1; + + nr_socket_post_create += !bpf_test_sockopt(sk, sk); + nr_binddev += !binddev_test(sk); + + return 1; +} + +SEC("sockops") +int skops_sockopt(struct bpf_sock_ops *skops) +{ + struct bpf_sock *bpf_sk = skops->sk; + struct sock *sk; + + if (!bpf_sk) + return 1; + + sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk); + if (!sk) + return 1; + + switch (skops->op) { + case BPF_SOCK_OPS_TCP_LISTEN_CB: + nr_listen += !bpf_test_sockopt(skops, sk); + break; + case BPF_SOCK_OPS_TCP_CONNECT_CB: + nr_connect += !bpf_test_sockopt(skops, sk); + break; + case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: + nr_active += !bpf_test_sockopt(skops, sk); + break; + case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: + nr_passive += !bpf_test_sockopt(skops, sk); + break; + } + + return 1; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From e918cd231ee6f1dc969e71718ed11c71e98f5c4c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 17 Aug 2022 22:32:42 +0100 Subject: selftests/bpf: Fix spelling mistake. There is a spelling mistake in an ASSERT_OK literal string. Fix it. Signed-off-by: Colin Ian King Acked-by: Mykola Lysenko Link: https://lore.kernel.org/r/20220817213242.101277-1-colin.i.king@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/kfunc_call.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 351fafa006fb..eede7c304f86 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -109,7 +109,7 @@ static void test_destructive(void) { __u64 save_caps = 0; - ASSERT_OK(test_destructive_open_and_load(), "succesful_load"); + ASSERT_OK(test_destructive_open_and_load(), "successful_load"); if (!ASSERT_OK(cap_disable_effective(1ULL << CAP_SYS_BOOT, &save_caps), "drop_caps")) return; -- cgit v1.2.3 From b979f005d9b1ebdba565e85f5228dda6fe7a30e4 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Fri, 19 Aug 2022 12:21:55 -0700 Subject: selftest/bpf: Add setget_sockopt to DENYLIST.s390x Trampoline is not supported in s390. Fixes: 31123c0360e0 ("selftests/bpf: bpf_setsockopt tests") Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20220819192155.91713-1-kafai@fb.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/DENYLIST.s390x | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index 9d8de15e725e..a708c3dcc154 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -65,3 +65,4 @@ send_signal # intermittently fails to receive signa select_reuseport # intermittently fails on new s390x setup xdp_synproxy # JIT does not support calling kernel function (kfunc) unpriv_bpf_disabled # fentry +setget_sockopt # attach unexpected error: -524 (trampoline) -- cgit v1.2.3 From 91350fe152930c0d61a362af68272526490efea5 Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Sun, 21 Aug 2022 14:35:17 +0300 Subject: bpf, flow_dissector: Introduce BPF_FLOW_DISSECTOR_CONTINUE retcode for bpf progs Currently, attaching BPF_PROG_TYPE_FLOW_DISSECTOR programs completely replaces the flow-dissector logic with custom dissection logic. This forces implementors to write programs that handle dissection for any flows expected in the namespace. It makes sense for flow-dissector BPF programs to just augment the dissector with custom logic (e.g. dissecting certain flows or custom protocols), while enjoying the broad capabilities of the standard dissector for any other traffic. Introduce BPF_FLOW_DISSECTOR_CONTINUE retcode. Flow-dissector BPF programs may return this to indicate no dissection was made, and fallback to the standard dissector is requested. Signed-off-by: Shmulik Ladkani Signed-off-by: Daniel Borkmann Reviewed-by: Stanislav Fomichev Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220821113519.116765-3-shmulik.ladkani@gmail.com --- tools/include/uapi/linux/bpf.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 1d6085e15fc8..f38814fbb618 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5861,6 +5861,11 @@ enum bpf_ret_code { * represented by BPF_REDIRECT above). */ BPF_LWT_REROUTE = 128, + /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR + * to indicate that no custom dissection was performed, and + * fallback to standard dissector is requested. + */ + BPF_FLOW_DISSECTOR_CONTINUE = 129, }; struct bpf_sock { -- cgit v1.2.3 From 5deedfbee84278da3b76fb7176dc3742f56eb370 Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Sun, 21 Aug 2022 14:35:18 +0300 Subject: bpf, test_run: Propagate bpf_flow_dissect's retval to user's bpf_attr.test.retval Formerly, a boolean denoting whether bpf_flow_dissect returned BPF_OK was set into 'bpf_attr.test.retval'. Augment this, so users can check the actual return code of the dissector program under test. Existing prog_tests/flow_dissector*.c tests were correspondingly changed to check against each test's expected retval. Also, tests' resulting 'flow_keys' are verified only in case the expected retval is BPF_OK. This allows adding new tests that expect non BPF_OK. Signed-off-by: Shmulik Ladkani Signed-off-by: Daniel Borkmann Reviewed-by: Stanislav Fomichev Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220821113519.116765-4-shmulik.ladkani@gmail.com --- .../selftests/bpf/prog_tests/flow_dissector.c | 23 +++++++++++++++++++++- .../bpf/prog_tests/flow_dissector_load_bytes.c | 2 +- 2 files changed, 23 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 0c1661ea996e..8fa3c454995e 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -100,6 +100,7 @@ struct test { } pkt; struct bpf_flow_keys keys; __u32 flags; + __u32 retval; }; #define VLAN_HLEN 4 @@ -126,6 +127,7 @@ struct test tests[] = { .sport = 80, .dport = 8080, }, + .retval = BPF_OK, }, { .name = "ipv6", @@ -146,6 +148,7 @@ struct test tests[] = { .sport = 80, .dport = 8080, }, + .retval = BPF_OK, }, { .name = "802.1q-ipv4", @@ -168,6 +171,7 @@ struct test tests[] = { .sport = 80, .dport = 8080, }, + .retval = BPF_OK, }, { .name = "802.1ad-ipv6", @@ -191,6 +195,7 @@ struct test tests[] = { .sport = 80, .dport = 8080, }, + .retval = BPF_OK, }, { .name = "ipv4-frag", @@ -217,6 +222,7 @@ struct test tests[] = { .dport = 8080, }, .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .retval = BPF_OK, }, { .name = "ipv4-no-frag", @@ -239,6 +245,7 @@ struct test tests[] = { .is_frag = true, .is_first_frag = true, }, + .retval = BPF_OK, }, { .name = "ipv6-frag", @@ -265,6 +272,7 @@ struct test tests[] = { .dport = 8080, }, .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .retval = BPF_OK, }, { .name = "ipv6-no-frag", @@ -287,6 +295,7 @@ struct test tests[] = { .is_frag = true, .is_first_frag = true, }, + .retval = BPF_OK, }, { .name = "ipv6-flow-label", @@ -309,6 +318,7 @@ struct test tests[] = { .dport = 8080, .flow_label = __bpf_constant_htonl(0xbeeef), }, + .retval = BPF_OK, }, { .name = "ipv6-no-flow-label", @@ -331,6 +341,7 @@ struct test tests[] = { .flow_label = __bpf_constant_htonl(0xbeeef), }, .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .retval = BPF_OK, }, { .name = "ipip-encap", @@ -359,6 +370,7 @@ struct test tests[] = { .sport = 80, .dport = 8080, }, + .retval = BPF_OK, }, { .name = "ipip-no-encap", @@ -386,6 +398,7 @@ struct test tests[] = { .is_encap = true, }, .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .retval = BPF_OK, }, }; @@ -503,6 +516,10 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); + /* check the stored flow_keys only if BPF_OK expected */ + if (tests[i].retval != BPF_OK) + continue; + err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); ASSERT_OK(err, "bpf_map_lookup_elem"); @@ -588,7 +605,11 @@ void test_flow_dissector(void) err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(topts.retval, 1, "test_run retval"); + ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval"); + + /* check the resulting flow_keys only if BPF_OK returned */ + if (topts.retval != BPF_OK) + continue; ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), "test_run data_size_out"); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c index 36afb409c25f..c7a47b57ac91 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -44,7 +44,7 @@ void serial_test_flow_dissector_load_bytes(void) ASSERT_OK(err, "test_run"); ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), "test_run data_size_out"); - ASSERT_EQ(topts.retval, 1, "test_run retval"); + ASSERT_EQ(topts.retval, BPF_OK, "test_run retval"); if (fd >= -1) close(fd); -- cgit v1.2.3 From d6513727c2af39a8cffb0d9b07376e51a85f347f Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Sun, 21 Aug 2022 14:35:19 +0300 Subject: bpf, selftests: Test BPF_FLOW_DISSECTOR_CONTINUE The dissector program returns BPF_FLOW_DISSECTOR_CONTINUE (and avoids setting skb->flow_keys or last_dissection map) in case it encounters IP packets whose (outer) source address is 127.0.0.127. Additional test is added to prog_tests/flow_dissector.c which sets this address as test's pkk.iph.saddr, with the expected retval of BPF_FLOW_DISSECTOR_CONTINUE. Also, legacy test_flow_dissector.sh was similarly augmented. Signed-off-by: Shmulik Ladkani Signed-off-by: Daniel Borkmann Reviewed-by: Stanislav Fomichev Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220821113519.116765-5-shmulik.ladkani@gmail.com --- .../selftests/bpf/prog_tests/flow_dissector.c | 21 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/bpf_flow.c | 15 +++++++++++++++ tools/testing/selftests/bpf/test_flow_dissector.sh | 8 ++++++++ 3 files changed, 44 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 8fa3c454995e..7acca37a3d2b 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -8,6 +8,8 @@ #include "bpf_flow.skel.h" +#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */ + #ifndef IP_MF #define IP_MF 0x2000 #endif @@ -400,6 +402,25 @@ struct test tests[] = { .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, .retval = BPF_OK, }, + { + .name = "ipip-encap-dissector-continue", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 99, + .tcp.dest = 9090, + }, + .retval = BPF_FLOW_DISSECTOR_CONTINUE, + }, }; static int create_tap(const char *ifname) diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index f266c757b3df..a20c5ed5e454 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -22,6 +22,8 @@ #define PROG(F) PROG_(F, _##F) #define PROG_(NUM, NAME) SEC("flow_dissector") int flow_dissector_##NUM +#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */ + /* These are the identifiers of the BPF programs that will be used in tail * calls. Name is limited to 16 characters, with the terminating character and * bpf_func_ above, we have only 6 to work with, anything after will be cropped. @@ -143,6 +145,19 @@ int _dissect(struct __sk_buff *skb) { struct bpf_flow_keys *keys = skb->flow_keys; + if (keys->n_proto == bpf_htons(ETH_P_IP)) { + /* IP traffic from FLOW_CONTINUE_SADDR falls-back to + * standard dissector + */ + struct iphdr *iph, _iph; + + iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph); + if (iph && iph->ihl == 5 && + iph->saddr == bpf_htonl(FLOW_CONTINUE_SADDR)) { + return BPF_FLOW_DISSECTOR_CONTINUE; + } + } + return parse_eth_proto(skb, keys->n_proto); } diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh index dbd91221727d..5303ce0c977b 100755 --- a/tools/testing/selftests/bpf/test_flow_dissector.sh +++ b/tools/testing/selftests/bpf/test_flow_dissector.sh @@ -115,6 +115,14 @@ tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \ # Send 10 IPv4/UDP packets from port 10. Filter should not drop any. ./test_flow_dissector -i 4 -f 10 +echo "Testing IPv4 from 127.0.0.127 (fallback to generic dissector)..." +# Send 10 IPv4/UDP packets from port 8. Filter should not drop any. +./test_flow_dissector -i 4 -S 127.0.0.127 -f 8 +# Send 10 IPv4/UDP packets from port 9. Filter should drop all. +./test_flow_dissector -i 4 -S 127.0.0.127 -f 9 -F +# Send 10 IPv4/UDP packets from port 10. Filter should not drop any. +./test_flow_dissector -i 4 -S 127.0.0.127 -f 10 + echo "Testing IPIP..." # Send 10 IPv4/IPv4/UDP packets from port 8. Filter should not drop any. ./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ -- cgit v1.2.3 From 2172fb8007eaafbef18563afb6c1ae5a976bf787 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 23 Aug 2022 15:25:54 -0700 Subject: bpf: update bpf_{g,s}et_retval documentation * replace 'syscall' with 'upper layers', still mention that it's being exported via syscall errno * describe what happens in set_retval(-EPERM) + return 1 * describe what happens with bind's 'return 3' Acked-by: Martin KaFai Lau Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20220823222555.523590-5-sdf@google.com Signed-off-by: Alexei Starovoitov --- tools/include/uapi/linux/bpf.h | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index f38814fbb618..4fb685591035 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5085,17 +5085,29 @@ union bpf_attr { * * int bpf_get_retval(void) * Description - * Get the syscall's return value that will be returned to userspace. + * Get the BPF program's return value that will be returned to the upper layers. * - * This helper is currently supported by cgroup programs only. + * This helper is currently supported by cgroup programs and only by the hooks + * where BPF program's return value is returned to the userspace via errno. * Return - * The syscall's return value. + * The BPF program's return value. * * int bpf_set_retval(int retval) * Description - * Set the syscall's return value that will be returned to userspace. + * Set the BPF program's return value that will be returned to the upper layers. + * + * This helper is currently supported by cgroup programs and only by the hooks + * where BPF program's return value is returned to the userspace via errno. + * + * Note that there is the following corner case where the program exports an error + * via bpf_set_retval but signals success via 'return 1': + * + * bpf_set_retval(-EPERM); + * return 1; + * + * In this case, the BPF program's return value will use helper's -EPERM. This + * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case. * - * This helper is currently supported by cgroup programs only. * Return * 0 on success, or a negative error in case of failure. * -- cgit v1.2.3 From e7215f574079ffb138258e8ebfa3f2bf5a4a1238 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 23 Aug 2022 15:25:55 -0700 Subject: selftests/bpf: Make sure bpf_{g,s}et_retval is exposed everywhere For each hook, have a simple bpf_set_retval(bpf_get_retval) program and make sure it loads for the hooks we want. The exceptions are the hooks which don't propagate the error to the callers: - sockops - recvmsg - getpeername - getsockname - cg_skb ingress and egress Acked-by: Martin KaFai Lau Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20220823222555.523590-6-sdf@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 1 + .../selftests/bpf/cgroup_getset_retval_hooks.h | 25 +++++++++++ .../bpf/prog_tests/cgroup_getset_retval.c | 48 ++++++++++++++++++++++ .../bpf/progs/cgroup_getset_retval_hooks.c | 16 ++++++++ 4 files changed, 90 insertions(+) create mode 100644 tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h create mode 100644 tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 8d59ec7f4c2d..eecad99f1735 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -323,6 +323,7 @@ $(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h +$(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h # Build BPF object using Clang # $1 - input .c file diff --git a/tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h b/tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h new file mode 100644 index 000000000000..a525d3544fd7 --- /dev/null +++ b/tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +BPF_RETVAL_HOOK(ingress, "cgroup_skb/ingress", __sk_buff, -EINVAL) +BPF_RETVAL_HOOK(egress, "cgroup_skb/egress", __sk_buff, -EINVAL) +BPF_RETVAL_HOOK(sock_create, "cgroup/sock_create", bpf_sock, 0) +BPF_RETVAL_HOOK(sock_ops, "sockops", bpf_sock_ops, -EINVAL) +BPF_RETVAL_HOOK(dev, "cgroup/dev", bpf_cgroup_dev_ctx, 0) +BPF_RETVAL_HOOK(bind4, "cgroup/bind4", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(bind6, "cgroup/bind6", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(connect4, "cgroup/connect4", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(connect6, "cgroup/connect6", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(post_bind4, "cgroup/post_bind4", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(post_bind6, "cgroup/post_bind6", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(sendmsg4, "cgroup/sendmsg4", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(sendmsg6, "cgroup/sendmsg6", bpf_sock_addr, 0) +BPF_RETVAL_HOOK(sysctl, "cgroup/sysctl", bpf_sysctl, 0) +BPF_RETVAL_HOOK(recvmsg4, "cgroup/recvmsg4", bpf_sock_addr, -EINVAL) +BPF_RETVAL_HOOK(recvmsg6, "cgroup/recvmsg6", bpf_sock_addr, -EINVAL) +BPF_RETVAL_HOOK(getsockopt, "cgroup/getsockopt", bpf_sockopt, 0) +BPF_RETVAL_HOOK(setsockopt, "cgroup/setsockopt", bpf_sockopt, 0) +BPF_RETVAL_HOOK(getpeername4, "cgroup/getpeername4", bpf_sock_addr, -EINVAL) +BPF_RETVAL_HOOK(getpeername6, "cgroup/getpeername6", bpf_sock_addr, -EINVAL) +BPF_RETVAL_HOOK(getsockname4, "cgroup/getsockname4", bpf_sock_addr, -EINVAL) +BPF_RETVAL_HOOK(getsockname6, "cgroup/getsockname6", bpf_sock_addr, -EINVAL) +BPF_RETVAL_HOOK(sock_release, "cgroup/sock_release", bpf_sock, 0) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c index 0b47c3c000c7..4d2fa99273d8 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c @@ -10,6 +10,7 @@ #include "cgroup_getset_retval_setsockopt.skel.h" #include "cgroup_getset_retval_getsockopt.skel.h" +#include "cgroup_getset_retval_hooks.skel.h" #define SOL_CUSTOM 0xdeadbeef @@ -433,6 +434,50 @@ close_bpf_object: cgroup_getset_retval_getsockopt__destroy(obj); } +struct exposed_hook { + const char *name; + int expected_err; +} exposed_hooks[] = { + +#define BPF_RETVAL_HOOK(NAME, SECTION, CTX, EXPECTED_ERR) \ + { \ + .name = #NAME, \ + .expected_err = EXPECTED_ERR, \ + }, + +#include "cgroup_getset_retval_hooks.h" + +#undef BPF_RETVAL_HOOK +}; + +static void test_exposed_hooks(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_hooks *skel; + struct bpf_program *prog; + int err; + int i; + + for (i = 0; i < ARRAY_SIZE(exposed_hooks); i++) { + skel = cgroup_getset_retval_hooks__open(); + if (!ASSERT_OK_PTR(skel, "cgroup_getset_retval_hooks__open")) + continue; + + prog = bpf_object__find_program_by_name(skel->obj, exposed_hooks[i].name); + if (!ASSERT_NEQ(prog, NULL, "bpf_object__find_program_by_name")) + goto close_skel; + + err = bpf_program__set_autoload(prog, true); + if (!ASSERT_OK(err, "bpf_program__set_autoload")) + goto close_skel; + + err = cgroup_getset_retval_hooks__load(skel); + ASSERT_EQ(err, exposed_hooks[i].expected_err, "expected_err"); + +close_skel: + cgroup_getset_retval_hooks__destroy(skel); + } +} + void test_cgroup_getset_retval(void) { int cgroup_fd = -1; @@ -476,6 +521,9 @@ void test_cgroup_getset_retval(void) if (test__start_subtest("getsockopt-retval_sync")) test_getsockopt_retval_sync(cgroup_fd, sock_fd); + if (test__start_subtest("exposed_hooks")) + test_exposed_hooks(cgroup_fd, sock_fd); + close_fd: close(cgroup_fd); } diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c new file mode 100644 index 000000000000..13dfb4bbfd28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#define BPF_RETVAL_HOOK(name, section, ctx, expected_err) \ + __attribute__((__section__("?" section))) \ + int name(struct ctx *_ctx) \ + { \ + bpf_set_retval(bpf_get_retval()); \ + return 1; \ + } + +#include "cgroup_getset_retval_hooks.h" + +#undef BPF_RETVAL_HOOK -- cgit v1.2.3 From 35f14dbd2fc6619dea8ac9eea18976378b18450b Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Tue, 23 Aug 2022 03:32:26 +0200 Subject: selftests/bpf: Add tests for reference state fixes for callbacks These are regression tests to ensure we don't end up in invalid runtime state for helpers that execute callbacks multiple times. It exercises the fixes to verifier callback handling for reference state in previous patches. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20220823013226.24988-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/cb_refs.c | 48 ++++++++++ tools/testing/selftests/bpf/progs/cb_refs.c | 116 +++++++++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/cb_refs.c create mode 100644 tools/testing/selftests/bpf/progs/cb_refs.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/cb_refs.c b/tools/testing/selftests/bpf/prog_tests/cb_refs.c new file mode 100644 index 000000000000..3bff680de16c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cb_refs.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "bpf/libbpf.h" +#include +#include + +#include "cb_refs.skel.h" + +static char log_buf[1024 * 1024]; + +struct { + const char *prog_name; + const char *err_msg; +} cb_refs_tests[] = { + { "underflow_prog", "reference has not been acquired before" }, + { "leak_prog", "Unreleased reference" }, + { "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */ + { "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */ +}; + +void test_cb_refs(void) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct bpf_program *prog; + struct cb_refs *skel; + int i; + + for (i = 0; i < ARRAY_SIZE(cb_refs_tests); i++) { + LIBBPF_OPTS(bpf_test_run_opts, run_opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + skel = cb_refs__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "cb_refs__open_and_load")) + return; + prog = bpf_object__find_program_by_name(skel->obj, cb_refs_tests[i].prog_name); + bpf_program__set_autoload(prog, true); + if (!ASSERT_ERR(cb_refs__load(skel), "cb_refs__load")) + bpf_prog_test_run_opts(bpf_program__fd(prog), &run_opts); + if (!ASSERT_OK_PTR(strstr(log_buf, cb_refs_tests[i].err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", cb_refs_tests[i].err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + cb_refs__destroy(skel); + } +} diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c new file mode 100644 index 000000000000..7653df1bc787 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +struct map_value { + struct prog_test_ref_kfunc __kptr_ref *ptr; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 16); +} array_map SEC(".maps"); + +extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; + +static __noinline int cb1(void *map, void *key, void *value, void *ctx) +{ + void *p = *(void **)ctx; + bpf_kfunc_call_test_release(p); + /* Without the fix this would cause underflow */ + return 0; +} + +SEC("?tc") +int underflow_prog(void *ctx) +{ + struct prog_test_ref_kfunc *p; + unsigned long sl = 0; + + p = bpf_kfunc_call_test_acquire(&sl); + if (!p) + return 0; + bpf_for_each_map_elem(&array_map, cb1, &p, 0); + return 0; +} + +static __always_inline int cb2(void *map, void *key, void *value, void *ctx) +{ + unsigned long sl = 0; + + *(void **)ctx = bpf_kfunc_call_test_acquire(&sl); + /* Without the fix this would leak memory */ + return 0; +} + +SEC("?tc") +int leak_prog(void *ctx) +{ + struct prog_test_ref_kfunc *p; + struct map_value *v; + unsigned long sl; + + v = bpf_map_lookup_elem(&array_map, &(int){0}); + if (!v) + return 0; + + p = NULL; + bpf_for_each_map_elem(&array_map, cb2, &p, 0); + p = bpf_kptr_xchg(&v->ptr, p); + if (p) + bpf_kfunc_call_test_release(p); + return 0; +} + +static __always_inline int cb(void *map, void *key, void *value, void *ctx) +{ + return 0; +} + +static __always_inline int cb3(void *map, void *key, void *value, void *ctx) +{ + unsigned long sl = 0; + void *p; + + bpf_kfunc_call_test_acquire(&sl); + bpf_for_each_map_elem(&array_map, cb, &p, 0); + /* It should only complain here, not in cb. This is why we need + * callback_ref to be set to frameno. + */ + return 0; +} + +SEC("?tc") +int nested_cb(void *ctx) +{ + struct prog_test_ref_kfunc *p; + unsigned long sl = 0; + int sp = 0; + + p = bpf_kfunc_call_test_acquire(&sl); + if (!p) + return 0; + bpf_for_each_map_elem(&array_map, cb3, &sp, 0); + bpf_kfunc_call_test_release(p); + return 0; +} + +SEC("?tc") +int non_cb_transfer_ref(void *ctx) +{ + struct prog_test_ref_kfunc *p; + unsigned long sl = 0; + + p = bpf_kfunc_call_test_acquire(&sl); + if (!p) + return 0; + cb1(NULL, NULL, NULL, &p); + bpf_kfunc_call_test_acquire(&sl); + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From b03914f7ff7bc5aca056aaa49fd3ff9120d24f47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20M=C3=BCller?= Date: Wed, 24 Aug 2022 16:39:06 +0000 Subject: selftests/bpf: Add cb_refs test to s390x deny list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cb_refs BPF selftest is failing execution on s390x machines. This is a newly added test that requires a feature not presently supported on this architecture. Denylist the test for this architecture. Fixes: 3cf7e7d8685c ("selftests/bpf: Add tests for reference state fixes for callbacks") Signed-off-by: Daniel Müller Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220824163906.1186832-1-deso@posteo.net Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/DENYLIST.s390x | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index a708c3dcc154..37bafcbf952a 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -66,3 +66,4 @@ select_reuseport # intermittently fails on new s390x set xdp_synproxy # JIT does not support calling kernel function (kfunc) unpriv_bpf_disabled # fentry setget_sockopt # attach unexpected error: -524 (trampoline) +cb_refs # expected error message unexpected error: -524 (trampoline) -- cgit v1.2.3 From 7e165d1939284d0bf16a83c591c3c5d24a110d0a Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Wed, 24 Aug 2022 09:39:07 +0800 Subject: selftests/bpf: Fix wrong size passed to bpf_setsockopt() sizeof(new_cc) is not real memory size that new_cc points to; introduce a new_cc_len to store the size and then pass it to bpf_setsockopt(). Fixes: 31123c0360e0 ("selftests/bpf: bpf_setsockopt tests") Signed-off-by: Yang Yingliang Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220824013907.380448-1-yangyingliang@huawei.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/setget_sockopt.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 4a4cb44a4a15..40606ef47a38 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -305,15 +305,19 @@ static int bpf_test_tcp_sockopt(__u32 i, struct loop_ctx *lc) if (t->opt == TCP_CONGESTION) { char old_cc[16], tmp_cc[16]; const char *new_cc; + int new_cc_len; if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc))) return 1; - if (!bpf_strncmp(old_cc, sizeof(old_cc), cubic_cc)) + if (!bpf_strncmp(old_cc, sizeof(old_cc), cubic_cc)) { new_cc = reno_cc; - else + new_cc_len = sizeof(reno_cc); + } else { new_cc = cubic_cc; + new_cc_len = sizeof(cubic_cc); + } if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, (void *)new_cc, - sizeof(new_cc))) + new_cc_len)) return 1; if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, tmp_cc, sizeof(tmp_cc))) return 1; -- cgit v1.2.3 From d4ccaf58a8472123ac97e6db03932c375b5c45ba Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Wed, 24 Aug 2022 16:31:13 -0700 Subject: bpf: Introduce cgroup iter Cgroup_iter is a type of bpf_iter. It walks over cgroups in four modes: - walking a cgroup's descendants in pre-order. - walking a cgroup's descendants in post-order. - walking a cgroup's ancestors. - process only the given cgroup. When attaching cgroup_iter, one can set a cgroup to the iter_link created from attaching. This cgroup is passed as a file descriptor or cgroup id and serves as the starting point of the walk. If no cgroup is specified, the starting point will be the root cgroup v2. For walking descendants, one can specify the order: either pre-order or post-order. For walking ancestors, the walk starts at the specified cgroup and ends at the root. One can also terminate the walk early by returning 1 from the iter program. Note that because walking cgroup hierarchy holds cgroup_mutex, the iter program is called with cgroup_mutex held. Currently only one session is supported, which means, depending on the volume of data bpf program intends to send to user space, the number of cgroups that can be walked is limited. For example, given the current buffer size is 8 * PAGE_SIZE, if the program sends 64B data for each cgroup, assuming PAGE_SIZE is 4kb, the total number of cgroups that can be walked is 512. This is a limitation of cgroup_iter. If the output data is larger than the kernel buffer size, after all data in the kernel buffer is consumed by user space, the subsequent read() syscall will signal EOPNOTSUPP. In order to work around, the user may have to update their program to reduce the volume of data sent to output. For example, skip some uninteresting cgroups. In future, we may extend bpf_iter flags to allow customizing buffer size. Acked-by: Yonghong Song Acked-by: Tejun Heo Signed-off-by: Hao Luo Link: https://lore.kernel.org/r/20220824233117.1312810-2-haoluo@google.com Signed-off-by: Alexei Starovoitov --- tools/include/uapi/linux/bpf.h | 30 +++++++++++++++++++++++ tools/testing/selftests/bpf/prog_tests/btf_dump.c | 4 +-- 2 files changed, 32 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4fb685591035..5056cef2112f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -87,10 +87,29 @@ struct bpf_cgroup_storage_key { __u32 attach_type; /* program attach type (enum bpf_attach_type) */ }; +enum bpf_cgroup_iter_order { + BPF_ITER_ORDER_UNSPEC = 0, + BPF_ITER_SELF_ONLY, /* process only a single object. */ + BPF_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */ + BPF_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */ + BPF_ITER_ANCESTORS_UP, /* walk ancestors upward. */ +}; + union bpf_iter_link_info { struct { __u32 map_fd; } map; + struct { + enum bpf_cgroup_iter_order order; + + /* At most one of cgroup_fd and cgroup_id can be non-zero. If + * both are zero, the walk starts from the default cgroup v2 + * root. For walking v1 hierarchy, one should always explicitly + * specify cgroup_fd. + */ + __u32 cgroup_fd; + __u64 cgroup_id; + } cgroup; }; /* BPF syscall commands, see bpf(2) man-page for more details. */ @@ -6176,11 +6195,22 @@ struct bpf_link_info { struct { __aligned_u64 target_name; /* in/out: target_name buffer ptr */ __u32 target_name_len; /* in/out: target_name buffer len */ + + /* If the iter specific field is 32 bits, it can be put + * in the first or second union. Otherwise it should be + * put in the second union. + */ union { struct { __u32 map_id; } map; }; + union { + struct { + __u64 cgroup_id; + __u32 order; + } cgroup; + }; } iter; struct { __u32 netns_ino; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 5fce7008d1ff..84c1cfaa2b02 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -764,8 +764,8 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, /* union with nested struct */ TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT, - "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},}", - { .map = { .map_fd = 1 }}); + "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (__u32)1,.cgroup_fd = (__u32)1,},}", + { .cgroup = { .order = 1, .cgroup_fd = 1, }}); /* struct skb with nested structs/unions; because type output is so * complex, we don't do a string comparison, just verify we return -- cgit v1.2.3 From fe0dd9d4b7402c9773fc7a453fa65875abaa24ec Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Wed, 24 Aug 2022 16:31:14 -0700 Subject: selftests/bpf: Test cgroup_iter. Add a selftest for cgroup_iter. The selftest creates a mini cgroup tree of the following structure: ROOT (working cgroup) | PARENT / \ CHILD1 CHILD2 and tests the following scenarios: - invalid cgroup fd. - pre-order walk over descendants from PARENT. - post-order walk over descendants from PARENT. - walk of ancestors from PARENT. - process only a single object (i.e. PARENT). - early termination. Acked-by: Yonghong Song Acked-by: Andrii Nakryiko Signed-off-by: Hao Luo Link: https://lore.kernel.org/r/20220824233117.1312810-3-haoluo@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 2 +- .../testing/selftests/bpf/prog_tests/cgroup_iter.c | 224 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/bpf_iter.h | 7 + tools/testing/selftests/bpf/progs/cgroup_iter.c | 39 ++++ 4 files changed, 271 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/cgroup_iter.c create mode 100644 tools/testing/selftests/bpf/progs/cgroup_iter.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 84c1cfaa2b02..a1bae92be1fc 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -764,7 +764,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, /* union with nested struct */ TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT, - "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (__u32)1,.cgroup_fd = (__u32)1,},}", + "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},}", { .cgroup = { .order = 1, .cgroup_fd = 1, }}); /* struct skb with nested structs/unions; because type output is so diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c new file mode 100644 index 000000000000..38958c37b9ce --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Google */ + +#include +#include +#include +#include "cgroup_iter.skel.h" +#include "cgroup_helpers.h" + +#define ROOT 0 +#define PARENT 1 +#define CHILD1 2 +#define CHILD2 3 +#define NUM_CGROUPS 4 + +#define PROLOGUE "prologue\n" +#define EPILOGUE "epilogue\n" + +static const char *cg_path[] = { + "/", "/parent", "/parent/child1", "/parent/child2" +}; + +static int cg_fd[] = {-1, -1, -1, -1}; +static unsigned long long cg_id[] = {0, 0, 0, 0}; +static char expected_output[64]; + +static int setup_cgroups(void) +{ + int fd, i = 0; + + for (i = 0; i < NUM_CGROUPS; i++) { + fd = create_and_get_cgroup(cg_path[i]); + if (fd < 0) + return fd; + + cg_fd[i] = fd; + cg_id[i] = get_cgroup_id(cg_path[i]); + } + return 0; +} + +static void cleanup_cgroups(void) +{ + int i; + + for (i = 0; i < NUM_CGROUPS; i++) + close(cg_fd[i]); +} + +static void read_from_cgroup_iter(struct bpf_program *prog, int cgroup_fd, + int order, const char *testname) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + int len, iter_fd; + static char buf[128]; + size_t left; + char *p; + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = cgroup_fd; + linfo.cgroup.order = order; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(prog, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (iter_fd < 0) + goto free_link; + + memset(buf, 0, sizeof(buf)); + left = ARRAY_SIZE(buf); + p = buf; + while ((len = read(iter_fd, p, left)) > 0) { + p += len; + left -= len; + } + + ASSERT_STREQ(buf, expected_output, testname); + + /* read() after iter finishes should be ok. */ + if (len == 0) + ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read"); + + close(iter_fd); +free_link: + bpf_link__destroy(link); +} + +/* Invalid cgroup. */ +static void test_invalid_cgroup(struct cgroup_iter *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = (__u32)-1; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts); + ASSERT_ERR_PTR(link, "attach_iter"); + bpf_link__destroy(link); +} + +/* Specifying both cgroup_fd and cgroup_id is invalid. */ +static void test_invalid_cgroup_spec(struct cgroup_iter *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = (__u32)cg_fd[PARENT]; + linfo.cgroup.cgroup_id = (__u64)cg_id[PARENT]; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts); + ASSERT_ERR_PTR(link, "attach_iter"); + bpf_link__destroy(link); +} + +/* Preorder walk prints parent and child in order. */ +static void test_walk_preorder(struct cgroup_iter *skel) +{ + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE, + cg_id[PARENT], cg_id[CHILD1], cg_id[CHILD2]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_ITER_DESCENDANTS_PRE, "preorder"); +} + +/* Postorder walk prints child and parent in order. */ +static void test_walk_postorder(struct cgroup_iter *skel) +{ + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE, + cg_id[CHILD1], cg_id[CHILD2], cg_id[PARENT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_ITER_DESCENDANTS_POST, "postorder"); +} + +/* Walking parents prints parent and then root. */ +static void test_walk_ancestors_up(struct cgroup_iter *skel) +{ + /* terminate the walk when ROOT is met. */ + skel->bss->terminal_cgroup = cg_id[ROOT]; + + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n%8llu\n" EPILOGUE, + cg_id[PARENT], cg_id[ROOT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_ITER_ANCESTORS_UP, "ancestors_up"); + + skel->bss->terminal_cgroup = 0; +} + +/* Early termination prints parent only. */ +static void test_early_termination(struct cgroup_iter *skel) +{ + /* terminate the walk after the first element is processed. */ + skel->bss->terminate_early = 1; + + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_ITER_DESCENDANTS_PRE, "early_termination"); + + skel->bss->terminate_early = 0; +} + +/* Waling self prints self only. */ +static void test_walk_self_only(struct cgroup_iter *skel) +{ + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_ITER_SELF_ONLY, "self_only"); +} + +void test_cgroup_iter(void) +{ + struct cgroup_iter *skel = NULL; + + if (setup_cgroup_environment()) + return; + + if (setup_cgroups()) + goto out; + + skel = cgroup_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "cgroup_iter__open_and_load")) + goto out; + + if (test__start_subtest("cgroup_iter__invalid_cgroup")) + test_invalid_cgroup(skel); + if (test__start_subtest("cgroup_iter__invalid_cgroup_spec")) + test_invalid_cgroup_spec(skel); + if (test__start_subtest("cgroup_iter__preorder")) + test_walk_preorder(skel); + if (test__start_subtest("cgroup_iter__postorder")) + test_walk_postorder(skel); + if (test__start_subtest("cgroup_iter__ancestors_up_walk")) + test_walk_ancestors_up(skel); + if (test__start_subtest("cgroup_iter__early_termination")) + test_early_termination(skel); + if (test__start_subtest("cgroup_iter__self_only")) + test_walk_self_only(skel); +out: + cgroup_iter__destroy(skel); + cleanup_cgroups(); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h index e9846606690d..c41ee80533ca 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -17,6 +17,7 @@ #define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used #define bpf_iter__sockmap bpf_iter__sockmap___not_used #define bpf_iter__bpf_link bpf_iter__bpf_link___not_used +#define bpf_iter__cgroup bpf_iter__cgroup___not_used #define btf_ptr btf_ptr___not_used #define BTF_F_COMPACT BTF_F_COMPACT___not_used #define BTF_F_NONAME BTF_F_NONAME___not_used @@ -40,6 +41,7 @@ #undef bpf_iter__bpf_sk_storage_map #undef bpf_iter__sockmap #undef bpf_iter__bpf_link +#undef bpf_iter__cgroup #undef btf_ptr #undef BTF_F_COMPACT #undef BTF_F_NONAME @@ -141,6 +143,11 @@ struct bpf_iter__bpf_link { struct bpf_link *link; }; +struct bpf_iter__cgroup { + struct bpf_iter_meta *meta; + struct cgroup *cgroup; +} __attribute__((preserve_access_index)); + struct btf_ptr { void *ptr; __u32 type_id; diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter.c b/tools/testing/selftests/bpf/progs/cgroup_iter.c new file mode 100644 index 000000000000..de03997322a7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_iter.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Google */ + +#include "bpf_iter.h" +#include +#include + +char _license[] SEC("license") = "GPL"; +int terminate_early = 0; +u64 terminal_cgroup = 0; + +static inline u64 cgroup_id(struct cgroup *cgrp) +{ + return cgrp->kn->id; +} + +SEC("iter/cgroup") +int cgroup_id_printer(struct bpf_iter__cgroup *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct cgroup *cgrp = ctx->cgroup; + + /* epilogue */ + if (cgrp == NULL) { + BPF_SEQ_PRINTF(seq, "epilogue\n"); + return 0; + } + + /* prologue */ + if (ctx->meta->seq_num == 0) + BPF_SEQ_PRINTF(seq, "prologue\n"); + + BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp)); + + if (terminal_cgroup == cgroup_id(cgrp)) + return 1; + + return terminate_early ? 1 : 0; +} -- cgit v1.2.3 From 434992bb603773c94465c7e68331e68424bdc9eb Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 24 Aug 2022 16:31:16 -0700 Subject: selftests/bpf: extend cgroup helpers This patch extends bpf selft cgroup_helpers [ID] n various ways: - Add enable_controllers() that allows tests to enable all or a subset of controllers for a specific cgroup. - Add join_cgroup_parent(). The cgroup workdir is based on the pid, therefore a spawned child cannot join the same cgroup hierarchy of the test through join_cgroup(). join_cgroup_parent() is used in child processes to join a cgroup under the parent's workdir. - Add write_cgroup_file() and write_cgroup_file_parent() (similar to join_cgroup_parent() above). - Add get_root_cgroup() for tests that need to do checks on root cgroup. - Distinguish relative and absolute cgroup paths in function arguments. Now relative paths are called relative_path, and absolute paths are called cgroup_path. Signed-off-by: Yosry Ahmed Signed-off-by: Hao Luo Link: https://lore.kernel.org/r/20220824233117.1312810-5-haoluo@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/cgroup_helpers.c | 202 +++++++++++++++++++++------ tools/testing/selftests/bpf/cgroup_helpers.h | 19 ++- 2 files changed, 174 insertions(+), 47 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index 9d59c3990ca8..e914cc45b766 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -33,49 +33,52 @@ #define CGROUP_MOUNT_DFLT "/sys/fs/cgroup" #define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls" #define CGROUP_WORK_DIR "/cgroup-test-work-dir" -#define format_cgroup_path(buf, path) \ + +#define format_cgroup_path_pid(buf, path, pid) \ snprintf(buf, sizeof(buf), "%s%s%d%s", CGROUP_MOUNT_PATH, \ - CGROUP_WORK_DIR, getpid(), path) + CGROUP_WORK_DIR, pid, path) + +#define format_cgroup_path(buf, path) \ + format_cgroup_path_pid(buf, path, getpid()) + +#define format_parent_cgroup_path(buf, path) \ + format_cgroup_path_pid(buf, path, getppid()) #define format_classid_path(buf) \ snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \ CGROUP_WORK_DIR) -/** - * enable_all_controllers() - Enable all available cgroup v2 controllers - * - * Enable all available cgroup v2 controllers in order to increase - * the code coverage. - * - * If successful, 0 is returned. - */ -static int enable_all_controllers(char *cgroup_path) +static int __enable_controllers(const char *cgroup_path, const char *controllers) { char path[PATH_MAX + 1]; - char buf[PATH_MAX]; + char enable[PATH_MAX + 1]; char *c, *c2; int fd, cfd; ssize_t len; - snprintf(path, sizeof(path), "%s/cgroup.controllers", cgroup_path); - fd = open(path, O_RDONLY); - if (fd < 0) { - log_err("Opening cgroup.controllers: %s", path); - return 1; - } - - len = read(fd, buf, sizeof(buf) - 1); - if (len < 0) { + /* If not controllers are passed, enable all available controllers */ + if (!controllers) { + snprintf(path, sizeof(path), "%s/cgroup.controllers", + cgroup_path); + fd = open(path, O_RDONLY); + if (fd < 0) { + log_err("Opening cgroup.controllers: %s", path); + return 1; + } + len = read(fd, enable, sizeof(enable) - 1); + if (len < 0) { + close(fd); + log_err("Reading cgroup.controllers: %s", path); + return 1; + } else if (len == 0) { /* No controllers to enable */ + close(fd); + return 0; + } + enable[len] = 0; close(fd); - log_err("Reading cgroup.controllers: %s", path); - return 1; + } else { + strncpy(enable, controllers, sizeof(enable)); } - buf[len] = 0; - close(fd); - - /* No controllers available? We're probably on cgroup v1. */ - if (len == 0) - return 0; snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path); cfd = open(path, O_RDWR); @@ -84,7 +87,7 @@ static int enable_all_controllers(char *cgroup_path) return 1; } - for (c = strtok_r(buf, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) { + for (c = strtok_r(enable, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) { if (dprintf(cfd, "+%s\n", c) <= 0) { log_err("Enabling controller %s: %s", c, path); close(cfd); @@ -95,6 +98,87 @@ static int enable_all_controllers(char *cgroup_path) return 0; } +/** + * enable_controllers() - Enable cgroup v2 controllers + * @relative_path: The cgroup path, relative to the workdir + * @controllers: List of controllers to enable in cgroup.controllers format + * + * + * Enable given cgroup v2 controllers, if @controllers is NULL, enable all + * available controllers. + * + * If successful, 0 is returned. + */ +int enable_controllers(const char *relative_path, const char *controllers) +{ + char cgroup_path[PATH_MAX + 1]; + + format_cgroup_path(cgroup_path, relative_path); + return __enable_controllers(cgroup_path, controllers); +} + +static int __write_cgroup_file(const char *cgroup_path, const char *file, + const char *buf) +{ + char file_path[PATH_MAX + 1]; + int fd; + + snprintf(file_path, sizeof(file_path), "%s/%s", cgroup_path, file); + fd = open(file_path, O_RDWR); + if (fd < 0) { + log_err("Opening %s", file_path); + return 1; + } + + if (dprintf(fd, "%s", buf) <= 0) { + log_err("Writing to %s", file_path); + close(fd); + return 1; + } + close(fd); + return 0; +} + +/** + * write_cgroup_file() - Write to a cgroup file + * @relative_path: The cgroup path, relative to the workdir + * @file: The name of the file in cgroupfs to write to + * @buf: Buffer to write to the file + * + * Write to a file in the given cgroup's directory. + * + * If successful, 0 is returned. + */ +int write_cgroup_file(const char *relative_path, const char *file, + const char *buf) +{ + char cgroup_path[PATH_MAX - 24]; + + format_cgroup_path(cgroup_path, relative_path); + return __write_cgroup_file(cgroup_path, file, buf); +} + +/** + * write_cgroup_file_parent() - Write to a cgroup file in the parent process + * workdir + * @relative_path: The cgroup path, relative to the parent process workdir + * @file: The name of the file in cgroupfs to write to + * @buf: Buffer to write to the file + * + * Write to a file in the given cgroup's directory under the parent process + * workdir. + * + * If successful, 0 is returned. + */ +int write_cgroup_file_parent(const char *relative_path, const char *file, + const char *buf) +{ + char cgroup_path[PATH_MAX - 24]; + + format_parent_cgroup_path(cgroup_path, relative_path); + return __write_cgroup_file(cgroup_path, file, buf); +} + /** * setup_cgroup_environment() - Setup the cgroup environment * @@ -133,7 +217,9 @@ int setup_cgroup_environment(void) return 1; } - if (enable_all_controllers(cgroup_workdir)) + /* Enable all available controllers to increase test coverage */ + if (__enable_controllers(CGROUP_MOUNT_PATH, NULL) || + __enable_controllers(cgroup_workdir, NULL)) return 1; return 0; @@ -173,7 +259,7 @@ static int join_cgroup_from_top(const char *cgroup_path) /** * join_cgroup() - Join a cgroup - * @path: The cgroup path, relative to the workdir, to join + * @relative_path: The cgroup path, relative to the workdir, to join * * This function expects a cgroup to already be created, relative to the cgroup * work dir, and it joins it. For example, passing "/my-cgroup" as the path @@ -182,11 +268,27 @@ static int join_cgroup_from_top(const char *cgroup_path) * * On success, it returns 0, otherwise on failure it returns 1. */ -int join_cgroup(const char *path) +int join_cgroup(const char *relative_path) +{ + char cgroup_path[PATH_MAX + 1]; + + format_cgroup_path(cgroup_path, relative_path); + return join_cgroup_from_top(cgroup_path); +} + +/** + * join_parent_cgroup() - Join a cgroup in the parent process workdir + * @relative_path: The cgroup path, relative to parent process workdir, to join + * + * See join_cgroup(). + * + * On success, it returns 0, otherwise on failure it returns 1. + */ +int join_parent_cgroup(const char *relative_path) { char cgroup_path[PATH_MAX + 1]; - format_cgroup_path(cgroup_path, path); + format_parent_cgroup_path(cgroup_path, relative_path); return join_cgroup_from_top(cgroup_path); } @@ -212,9 +314,27 @@ void cleanup_cgroup_environment(void) nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT); } +/** + * get_root_cgroup() - Get the FD of the root cgroup + * + * On success, it returns the file descriptor. On failure, it returns -1. + * If there is a failure, it prints the error to stderr. + */ +int get_root_cgroup(void) +{ + int fd; + + fd = open(CGROUP_MOUNT_PATH, O_RDONLY); + if (fd < 0) { + log_err("Opening root cgroup"); + return -1; + } + return fd; +} + /** * create_and_get_cgroup() - Create a cgroup, relative to workdir, and get the FD - * @path: The cgroup path, relative to the workdir, to join + * @relative_path: The cgroup path, relative to the workdir, to join * * This function creates a cgroup under the top level workdir and returns the * file descriptor. It is idempotent. @@ -222,14 +342,14 @@ void cleanup_cgroup_environment(void) * On success, it returns the file descriptor. On failure it returns -1. * If there is a failure, it prints the error to stderr. */ -int create_and_get_cgroup(const char *path) +int create_and_get_cgroup(const char *relative_path) { char cgroup_path[PATH_MAX + 1]; int fd; - format_cgroup_path(cgroup_path, path); + format_cgroup_path(cgroup_path, relative_path); if (mkdir(cgroup_path, 0777) && errno != EEXIST) { - log_err("mkdiring cgroup %s .. %s", path, cgroup_path); + log_err("mkdiring cgroup %s .. %s", relative_path, cgroup_path); return -1; } @@ -244,13 +364,13 @@ int create_and_get_cgroup(const char *path) /** * get_cgroup_id() - Get cgroup id for a particular cgroup path - * @path: The cgroup path, relative to the workdir, to join + * @relative_path: The cgroup path, relative to the workdir, to join * * On success, it returns the cgroup id. On failure it returns 0, * which is an invalid cgroup id. * If there is a failure, it prints the error to stderr. */ -unsigned long long get_cgroup_id(const char *path) +unsigned long long get_cgroup_id(const char *relative_path) { int dirfd, err, flags, mount_id, fhsize; union { @@ -261,7 +381,7 @@ unsigned long long get_cgroup_id(const char *path) struct file_handle *fhp, *fhp2; unsigned long long ret = 0; - format_cgroup_path(cgroup_workdir, path); + format_cgroup_path(cgroup_workdir, relative_path); dirfd = AT_FDCWD; flags = 0; diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index fcc9cb91b211..3358734356ab 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -10,11 +10,18 @@ __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) /* cgroupv2 related */ -int cgroup_setup_and_join(const char *path); -int create_and_get_cgroup(const char *path); -unsigned long long get_cgroup_id(const char *path); - -int join_cgroup(const char *path); +int enable_controllers(const char *relative_path, const char *controllers); +int write_cgroup_file(const char *relative_path, const char *file, + const char *buf); +int write_cgroup_file_parent(const char *relative_path, const char *file, + const char *buf); +int cgroup_setup_and_join(const char *relative_path); +int get_root_cgroup(void); +int create_and_get_cgroup(const char *relative_path); +unsigned long long get_cgroup_id(const char *relative_path); + +int join_cgroup(const char *relative_path); +int join_parent_cgroup(const char *relative_path); int setup_cgroup_environment(void); void cleanup_cgroup_environment(void); @@ -26,4 +33,4 @@ int join_classid(void); int setup_classid_environment(void); void cleanup_classid_environment(void); -#endif /* __CGROUP_HELPERS_H */ \ No newline at end of file +#endif /* __CGROUP_HELPERS_H */ -- cgit v1.2.3 From 88886309d2e82afcaa86fc302c2ba25d9e47cbc8 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 24 Aug 2022 16:31:17 -0700 Subject: selftests/bpf: add a selftest for cgroup hierarchical stats collection Add a selftest that tests the whole workflow for collecting, aggregating (flushing), and displaying cgroup hierarchical stats. TL;DR: - Userspace program creates a cgroup hierarchy and induces memcg reclaim in parts of it. - Whenever reclaim happens, vmscan_start and vmscan_end update per-cgroup percpu readings, and tell rstat which (cgroup, cpu) pairs have updates. - When userspace tries to read the stats, vmscan_dump calls rstat to flush the stats, and outputs the stats in text format to userspace (similar to cgroupfs stats). - rstat calls vmscan_flush once for every (cgroup, cpu) pair that has updates, vmscan_flush aggregates cpu readings and propagates updates to parents. - Userspace program makes sure the stats are aggregated and read correctly. Detailed explanation: - The test loads tracing bpf programs, vmscan_start and vmscan_end, to measure the latency of cgroup reclaim. Per-cgroup readings are stored in percpu maps for efficiency. When a cgroup reading is updated on a cpu, cgroup_rstat_updated(cgroup, cpu) is called to add the cgroup to the rstat updated tree on that cpu. - A cgroup_iter program, vmscan_dump, is loaded and pinned to a file, for each cgroup. Reading this file invokes the program, which calls cgroup_rstat_flush(cgroup) to ask rstat to propagate the updates for all cpus and cgroups that have updates in this cgroup's subtree. Afterwards, the stats are exposed to the user. vmscan_dump returns 1 to terminate iteration early, so that we only expose stats for one cgroup per read. - An ftrace program, vmscan_flush, is also loaded and attached to bpf_rstat_flush. When rstat flushing is ongoing, vmscan_flush is invoked once for each (cgroup, cpu) pair that has updates. cgroups are popped from the rstat tree in a bottom-up fashion, so calls will always be made for cgroups that have updates before their parents. The program aggregates percpu readings to a total per-cgroup reading, and also propagates them to the parent cgroup. After rstat flushing is over, all cgroups will have correct updated hierarchical readings (including all cpus and all their descendants). - Finally, the test creates a cgroup hierarchy and induces memcg reclaim in parts of it, and makes sure that the stats collection, aggregation, and reading workflow works as expected. Signed-off-by: Yosry Ahmed Signed-off-by: Hao Luo Link: https://lore.kernel.org/r/20220824233117.1312810-6-haoluo@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/DENYLIST.s390x | 1 + .../bpf/prog_tests/cgroup_hierarchical_stats.c | 357 +++++++++++++++++++++ .../bpf/progs/cgroup_hierarchical_stats.c | 226 +++++++++++++ 3 files changed, 584 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c create mode 100644 tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index 37bafcbf952a..736b65f61022 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -67,3 +67,4 @@ xdp_synproxy # JIT does not support calling kernel f unpriv_bpf_disabled # fentry setget_sockopt # attach unexpected error: -524 (trampoline) cb_refs # expected error message unexpected error: -524 (trampoline) +cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c new file mode 100644 index 000000000000..101a6d70b863 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Functions to manage eBPF programs attached to cgroup subsystems + * + * Copyright 2022 Google LLC. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cgroup_helpers.h" +#include "cgroup_hierarchical_stats.skel.h" + +#define PAGE_SIZE 4096 +#define MB(x) (x << 20) + +#define BPFFS_ROOT "/sys/fs/bpf/" +#define BPFFS_VMSCAN BPFFS_ROOT"vmscan/" + +#define CG_ROOT_NAME "root" +#define CG_ROOT_ID 1 + +#define CGROUP_PATH(p, n) {.path = p"/"n, .name = n} + +static struct { + const char *path, *name; + unsigned long long id; + int fd; +} cgroups[] = { + CGROUP_PATH("/", "test"), + CGROUP_PATH("/test", "child1"), + CGROUP_PATH("/test", "child2"), + CGROUP_PATH("/test/child1", "child1_1"), + CGROUP_PATH("/test/child1", "child1_2"), + CGROUP_PATH("/test/child2", "child2_1"), + CGROUP_PATH("/test/child2", "child2_2"), +}; + +#define N_CGROUPS ARRAY_SIZE(cgroups) +#define N_NON_LEAF_CGROUPS 3 + +static int root_cgroup_fd; +static bool mounted_bpffs; + +/* reads file at 'path' to 'buf', returns 0 on success. */ +static int read_from_file(const char *path, char *buf, size_t size) +{ + int fd, len; + + fd = open(path, O_RDONLY); + if (fd < 0) + return fd; + + len = read(fd, buf, size); + close(fd); + if (len < 0) + return len; + + buf[len] = 0; + return 0; +} + +/* mounts bpffs and mkdir for reading stats, returns 0 on success. */ +static int setup_bpffs(void) +{ + int err; + + /* Mount bpffs */ + err = mount("bpf", BPFFS_ROOT, "bpf", 0, NULL); + mounted_bpffs = !err; + if (ASSERT_FALSE(err && errno != EBUSY, "mount")) + return err; + + /* Create a directory to contain stat files in bpffs */ + err = mkdir(BPFFS_VMSCAN, 0755); + if (!ASSERT_OK(err, "mkdir")) + return err; + + return 0; +} + +static void cleanup_bpffs(void) +{ + /* Remove created directory in bpffs */ + ASSERT_OK(rmdir(BPFFS_VMSCAN), "rmdir "BPFFS_VMSCAN); + + /* Unmount bpffs, if it wasn't already mounted when we started */ + if (mounted_bpffs) + return; + + ASSERT_OK(umount(BPFFS_ROOT), "unmount bpffs"); +} + +/* sets up cgroups, returns 0 on success. */ +static int setup_cgroups(void) +{ + int i, fd, err; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + return err; + + root_cgroup_fd = get_root_cgroup(); + if (!ASSERT_GE(root_cgroup_fd, 0, "get_root_cgroup")) + return root_cgroup_fd; + + for (i = 0; i < N_CGROUPS; i++) { + fd = create_and_get_cgroup(cgroups[i].path); + if (!ASSERT_GE(fd, 0, "create_and_get_cgroup")) + return fd; + + cgroups[i].fd = fd; + cgroups[i].id = get_cgroup_id(cgroups[i].path); + + /* + * Enable memcg controller for the entire hierarchy. + * Note that stats are collected for all cgroups in a hierarchy + * with memcg enabled anyway, but are only exposed for cgroups + * that have memcg enabled. + */ + if (i < N_NON_LEAF_CGROUPS) { + err = enable_controllers(cgroups[i].path, "memory"); + if (!ASSERT_OK(err, "enable_controllers")) + return err; + } + } + return 0; +} + +static void cleanup_cgroups(void) +{ + close(root_cgroup_fd); + for (int i = 0; i < N_CGROUPS; i++) + close(cgroups[i].fd); + cleanup_cgroup_environment(); +} + +/* Sets up cgroup hiearchary, returns 0 on success. */ +static int setup_hierarchy(void) +{ + return setup_bpffs() || setup_cgroups(); +} + +static void destroy_hierarchy(void) +{ + cleanup_cgroups(); + cleanup_bpffs(); +} + +static int reclaimer(const char *cgroup_path, size_t size) +{ + static char size_buf[128]; + char *buf, *ptr; + int err; + + /* Join cgroup in the parent process workdir */ + if (join_parent_cgroup(cgroup_path)) + return EACCES; + + /* Allocate memory */ + buf = malloc(size); + if (!buf) + return ENOMEM; + + /* Write to memory to make sure it's actually allocated */ + for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) + *ptr = 1; + + /* Try to reclaim memory */ + snprintf(size_buf, 128, "%lu", size); + err = write_cgroup_file_parent(cgroup_path, "memory.reclaim", size_buf); + + free(buf); + /* memory.reclaim returns EAGAIN if the amount is not fully reclaimed */ + if (err && errno != EAGAIN) + return errno; + + return 0; +} + +static int induce_vmscan(void) +{ + int i, status; + + /* + * In every leaf cgroup, run a child process that allocates some memory + * and attempts to reclaim some of it. + */ + for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) { + pid_t pid; + + /* Create reclaimer child */ + pid = fork(); + if (pid == 0) { + status = reclaimer(cgroups[i].path, MB(5)); + exit(status); + } + + /* Cleanup reclaimer child */ + waitpid(pid, &status, 0); + ASSERT_TRUE(WIFEXITED(status), "reclaimer exited"); + ASSERT_EQ(WEXITSTATUS(status), 0, "reclaim exit code"); + } + return 0; +} + +static unsigned long long +get_cgroup_vmscan_delay(unsigned long long cgroup_id, const char *file_name) +{ + unsigned long long vmscan = 0, id = 0; + static char buf[128], path[128]; + + /* For every cgroup, read the file generated by cgroup_iter */ + snprintf(path, 128, "%s%s", BPFFS_VMSCAN, file_name); + if (!ASSERT_OK(read_from_file(path, buf, 128), "read cgroup_iter")) + return 0; + + /* Check the output file formatting */ + ASSERT_EQ(sscanf(buf, "cg_id: %llu, total_vmscan_delay: %llu\n", + &id, &vmscan), 2, "output format"); + + /* Check that the cgroup_id is displayed correctly */ + ASSERT_EQ(id, cgroup_id, "cgroup_id"); + /* Check that the vmscan reading is non-zero */ + ASSERT_GT(vmscan, 0, "vmscan_reading"); + return vmscan; +} + +static void check_vmscan_stats(void) +{ + unsigned long long vmscan_readings[N_CGROUPS], vmscan_root; + int i; + + for (i = 0; i < N_CGROUPS; i++) { + vmscan_readings[i] = get_cgroup_vmscan_delay(cgroups[i].id, + cgroups[i].name); + } + + /* Read stats for root too */ + vmscan_root = get_cgroup_vmscan_delay(CG_ROOT_ID, CG_ROOT_NAME); + + /* Check that child1 == child1_1 + child1_2 */ + ASSERT_EQ(vmscan_readings[1], vmscan_readings[3] + vmscan_readings[4], + "child1_vmscan"); + /* Check that child2 == child2_1 + child2_2 */ + ASSERT_EQ(vmscan_readings[2], vmscan_readings[5] + vmscan_readings[6], + "child2_vmscan"); + /* Check that test == child1 + child2 */ + ASSERT_EQ(vmscan_readings[0], vmscan_readings[1] + vmscan_readings[2], + "test_vmscan"); + /* Check that root >= test */ + ASSERT_GE(vmscan_root, vmscan_readings[1], "root_vmscan"); +} + +/* Creates iter link and pins in bpffs, returns 0 on success, -errno on failure. + */ +static int setup_cgroup_iter(struct cgroup_hierarchical_stats *obj, + int cgroup_fd, const char *file_name) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo = {}; + struct bpf_link *link; + static char path[128]; + int err; + + /* + * Create an iter link, parameterized by cgroup_fd. We only want to + * traverse one cgroup, so set the traversal order to "self". + */ + linfo.cgroup.cgroup_fd = cgroup_fd; + linfo.cgroup.order = BPF_ITER_SELF_ONLY; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(obj->progs.dump_vmscan, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return -EFAULT; + + /* Pin the link to a bpffs file */ + snprintf(path, 128, "%s%s", BPFFS_VMSCAN, file_name); + err = bpf_link__pin(link, path); + ASSERT_OK(err, "pin cgroup_iter"); + + /* Remove the link, leaving only the ref held by the pinned file */ + bpf_link__destroy(link); + return err; +} + +/* Sets up programs for collecting stats, returns 0 on success. */ +static int setup_progs(struct cgroup_hierarchical_stats **skel) +{ + int i, err; + + *skel = cgroup_hierarchical_stats__open_and_load(); + if (!ASSERT_OK_PTR(*skel, "open_and_load")) + return 1; + + /* Attach cgroup_iter program that will dump the stats to cgroups */ + for (i = 0; i < N_CGROUPS; i++) { + err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); + if (!ASSERT_OK(err, "setup_cgroup_iter")) + return err; + } + + /* Also dump stats for root */ + err = setup_cgroup_iter(*skel, root_cgroup_fd, CG_ROOT_NAME); + if (!ASSERT_OK(err, "setup_cgroup_iter")) + return err; + + bpf_program__set_autoattach((*skel)->progs.dump_vmscan, false); + err = cgroup_hierarchical_stats__attach(*skel); + if (!ASSERT_OK(err, "attach")) + return err; + + return 0; +} + +static void destroy_progs(struct cgroup_hierarchical_stats *skel) +{ + static char path[128]; + int i; + + for (i = 0; i < N_CGROUPS; i++) { + /* Delete files in bpffs that cgroup_iters are pinned in */ + snprintf(path, 128, "%s%s", BPFFS_VMSCAN, + cgroups[i].name); + ASSERT_OK(remove(path), "remove cgroup_iter pin"); + } + + /* Delete root file in bpffs */ + snprintf(path, 128, "%s%s", BPFFS_VMSCAN, CG_ROOT_NAME); + ASSERT_OK(remove(path), "remove cgroup_iter root pin"); + cgroup_hierarchical_stats__destroy(skel); +} + +void test_cgroup_hierarchical_stats(void) +{ + struct cgroup_hierarchical_stats *skel = NULL; + + if (setup_hierarchy()) + goto hierarchy_cleanup; + if (setup_progs(&skel)) + goto cleanup; + if (induce_vmscan()) + goto cleanup; + check_vmscan_stats(); +cleanup: + destroy_progs(skel); +hierarchy_cleanup: + destroy_hierarchy(); +} diff --git a/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c new file mode 100644 index 000000000000..8ab4253a1592 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Functions to manage eBPF programs attached to cgroup subsystems + * + * Copyright 2022 Google LLC. + */ +#include "vmlinux.h" +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +/* + * Start times are stored per-task, not per-cgroup, as multiple tasks in one + * cgroup can perform reclaim concurrently. + */ +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, __u64); +} vmscan_start_time SEC(".maps"); + +struct vmscan_percpu { + /* Previous percpu state, to figure out if we have new updates */ + __u64 prev; + /* Current percpu state */ + __u64 state; +}; + +struct vmscan { + /* State propagated through children, pending aggregation */ + __u64 pending; + /* Total state, including all cpus and all children */ + __u64 state; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 100); + __type(key, __u64); + __type(value, struct vmscan_percpu); +} pcpu_cgroup_vmscan_elapsed SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 100); + __type(key, __u64); + __type(value, struct vmscan); +} cgroup_vmscan_elapsed SEC(".maps"); + +extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __ksym; +extern void cgroup_rstat_flush(struct cgroup *cgrp) __ksym; + +static struct cgroup *task_memcg(struct task_struct *task) +{ + int cgrp_id; + +#if __has_builtin(__builtin_preserve_enum_value) + cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id, memory_cgrp_id); +#else + cgrp_id = memory_cgrp_id; +#endif + return task->cgroups->subsys[cgrp_id]->cgroup; +} + +static uint64_t cgroup_id(struct cgroup *cgrp) +{ + return cgrp->kn->id; +} + +static int create_vmscan_percpu_elem(__u64 cg_id, __u64 state) +{ + struct vmscan_percpu pcpu_init = {.state = state, .prev = 0}; + + return bpf_map_update_elem(&pcpu_cgroup_vmscan_elapsed, &cg_id, + &pcpu_init, BPF_NOEXIST); +} + +static int create_vmscan_elem(__u64 cg_id, __u64 state, __u64 pending) +{ + struct vmscan init = {.state = state, .pending = pending}; + + return bpf_map_update_elem(&cgroup_vmscan_elapsed, &cg_id, + &init, BPF_NOEXIST); +} + +SEC("tp_btf/mm_vmscan_memcg_reclaim_begin") +int BPF_PROG(vmscan_start, int order, gfp_t gfp_flags) +{ + struct task_struct *task = bpf_get_current_task_btf(); + __u64 *start_time_ptr; + + start_time_ptr = bpf_task_storage_get(&vmscan_start_time, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (start_time_ptr) + *start_time_ptr = bpf_ktime_get_ns(); + return 0; +} + +SEC("tp_btf/mm_vmscan_memcg_reclaim_end") +int BPF_PROG(vmscan_end, unsigned long nr_reclaimed) +{ + struct vmscan_percpu *pcpu_stat; + struct task_struct *current = bpf_get_current_task_btf(); + struct cgroup *cgrp; + __u64 *start_time_ptr; + __u64 current_elapsed, cg_id; + __u64 end_time = bpf_ktime_get_ns(); + + /* + * cgrp is the first parent cgroup of current that has memcg enabled in + * its subtree_control, or NULL if memcg is disabled in the entire tree. + * In a cgroup hierarchy like this: + * a + * / \ + * b c + * If "a" has memcg enabled, while "b" doesn't, then processes in "b" + * will accumulate their stats directly to "a". This makes sure that no + * stats are lost from processes in leaf cgroups that don't have memcg + * enabled, but only exposes stats for cgroups that have memcg enabled. + */ + cgrp = task_memcg(current); + if (!cgrp) + return 0; + + cg_id = cgroup_id(cgrp); + start_time_ptr = bpf_task_storage_get(&vmscan_start_time, current, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!start_time_ptr) + return 0; + + current_elapsed = end_time - *start_time_ptr; + pcpu_stat = bpf_map_lookup_elem(&pcpu_cgroup_vmscan_elapsed, + &cg_id); + if (pcpu_stat) + pcpu_stat->state += current_elapsed; + else if (create_vmscan_percpu_elem(cg_id, current_elapsed)) + return 0; + + cgroup_rstat_updated(cgrp, bpf_get_smp_processor_id()); + return 0; +} + +SEC("fentry/bpf_rstat_flush") +int BPF_PROG(vmscan_flush, struct cgroup *cgrp, struct cgroup *parent, int cpu) +{ + struct vmscan_percpu *pcpu_stat; + struct vmscan *total_stat, *parent_stat; + __u64 cg_id = cgroup_id(cgrp); + __u64 parent_cg_id = parent ? cgroup_id(parent) : 0; + __u64 *pcpu_vmscan; + __u64 state; + __u64 delta = 0; + + /* Add CPU changes on this level since the last flush */ + pcpu_stat = bpf_map_lookup_percpu_elem(&pcpu_cgroup_vmscan_elapsed, + &cg_id, cpu); + if (pcpu_stat) { + state = pcpu_stat->state; + delta += state - pcpu_stat->prev; + pcpu_stat->prev = state; + } + + total_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, &cg_id); + if (!total_stat) { + if (create_vmscan_elem(cg_id, delta, 0)) + return 0; + + goto update_parent; + } + + /* Collect pending stats from subtree */ + if (total_stat->pending) { + delta += total_stat->pending; + total_stat->pending = 0; + } + + /* Propagate changes to this cgroup's total */ + total_stat->state += delta; + +update_parent: + /* Skip if there are no changes to propagate, or no parent */ + if (!delta || !parent_cg_id) + return 0; + + /* Propagate changes to cgroup's parent */ + parent_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, + &parent_cg_id); + if (parent_stat) + parent_stat->pending += delta; + else + create_vmscan_elem(parent_cg_id, 0, delta); + return 0; +} + +SEC("iter.s/cgroup") +int BPF_PROG(dump_vmscan, struct bpf_iter_meta *meta, struct cgroup *cgrp) +{ + struct seq_file *seq = meta->seq; + struct vmscan *total_stat; + __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0; + + /* Do nothing for the terminal call */ + if (!cg_id) + return 1; + + /* Flush the stats to make sure we get the most updated numbers */ + cgroup_rstat_flush(cgrp); + + total_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, &cg_id); + if (!total_stat) { + BPF_SEQ_PRINTF(seq, "cg_id: %llu, total_vmscan_delay: 0\n", + cg_id); + } else { + BPF_SEQ_PRINTF(seq, "cg_id: %llu, total_vmscan_delay: %llu\n", + cg_id, total_stat->state); + } + + /* + * We only dump stats for one cgroup here, so return 1 to stop + * iteration after the first cgroup. + */ + return 1; +} -- cgit v1.2.3 From 7184aef9c0f7a81db8fd18d183ee42481d89bf35 Mon Sep 17 00:00:00 2001 From: Lam Thai Date: Wed, 24 Aug 2022 15:59:00 -0700 Subject: bpftool: Fix a wrong type cast in btf_dumper_int When `data` points to a boolean value, casting it to `int *` is problematic and could lead to a wrong value being passed to `jsonw_bool`. Change the cast to `bool *` instead. Fixes: b12d6ec09730 ("bpf: btf: add btf print functionality") Signed-off-by: Lam Thai Signed-off-by: Andrii Nakryiko Reviewed-by: Quentin Monnet Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220824225859.9038-1-lamthai@arista.com --- tools/bpf/bpftool/btf_dumper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 125798b0bc5d..19924b6ce796 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -452,7 +452,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset, *(char *)data); break; case BTF_INT_BOOL: - jsonw_bool(jw, *(int *)data); + jsonw_bool(jw, *(bool *)data); break; default: /* shouldn't happen */ -- cgit v1.2.3 From d4ffb6f39f1a1b260966b43a4ffdb64779c650dd Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Thu, 25 Aug 2022 15:39:36 -0700 Subject: bpf: Add CGROUP prefix to cgroup_iter_order bpf_cgroup_iter_order is globally visible but the entries do not have CGROUP prefix. As requested by Andrii, put a CGROUP in the names in bpf_cgroup_iter_order. This patch fixes two previous commits: one introduced the API and the other uses the API in bpf selftest (that is, the selftest cgroup_hierarchical_stats). I tested this patch via the following command: test_progs -t cgroup,iter,btf_dump Fixes: d4ccaf58a847 ("bpf: Introduce cgroup iter") Fixes: 88886309d2e8 ("selftests/bpf: add a selftest for cgroup hierarchical stats collection") Suggested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Signed-off-by: Hao Luo Link: https://lore.kernel.org/r/20220825223936.1865810-1-haoluo@google.com Signed-off-by: Martin KaFai Lau --- tools/include/uapi/linux/bpf.h | 10 +++++----- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 2 +- .../selftests/bpf/prog_tests/cgroup_hierarchical_stats.c | 2 +- tools/testing/selftests/bpf/prog_tests/cgroup_iter.c | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 5056cef2112f..92f7387e378a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -88,11 +88,11 @@ struct bpf_cgroup_storage_key { }; enum bpf_cgroup_iter_order { - BPF_ITER_ORDER_UNSPEC = 0, - BPF_ITER_SELF_ONLY, /* process only a single object. */ - BPF_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */ - BPF_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */ - BPF_ITER_ANCESTORS_UP, /* walk ancestors upward. */ + BPF_CGROUP_ITER_ORDER_UNSPEC = 0, + BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */ + BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */ + BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */ + BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */ }; union bpf_iter_link_info { diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index a1bae92be1fc..7b5bbe21b549 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -764,7 +764,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, /* union with nested struct */ TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT, - "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},}", + "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_CGROUP_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},}", { .cgroup = { .order = 1, .cgroup_fd = 1, }}); /* struct skb with nested structs/unions; because type output is so diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c index 101a6d70b863..bed1661596f7 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c @@ -275,7 +275,7 @@ static int setup_cgroup_iter(struct cgroup_hierarchical_stats *obj, * traverse one cgroup, so set the traversal order to "self". */ linfo.cgroup.cgroup_fd = cgroup_fd; - linfo.cgroup.order = BPF_ITER_SELF_ONLY; + linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY; opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); link = bpf_program__attach_iter(obj->progs.dump_vmscan, &opts); diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c index 38958c37b9ce..c4a2adb38da1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c @@ -134,7 +134,7 @@ static void test_walk_preorder(struct cgroup_iter *skel) cg_id[PARENT], cg_id[CHILD1], cg_id[CHILD2]); read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], - BPF_ITER_DESCENDANTS_PRE, "preorder"); + BPF_CGROUP_ITER_DESCENDANTS_PRE, "preorder"); } /* Postorder walk prints child and parent in order. */ @@ -145,7 +145,7 @@ static void test_walk_postorder(struct cgroup_iter *skel) cg_id[CHILD1], cg_id[CHILD2], cg_id[PARENT]); read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], - BPF_ITER_DESCENDANTS_POST, "postorder"); + BPF_CGROUP_ITER_DESCENDANTS_POST, "postorder"); } /* Walking parents prints parent and then root. */ @@ -159,7 +159,7 @@ static void test_walk_ancestors_up(struct cgroup_iter *skel) cg_id[PARENT], cg_id[ROOT]); read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], - BPF_ITER_ANCESTORS_UP, "ancestors_up"); + BPF_CGROUP_ITER_ANCESTORS_UP, "ancestors_up"); skel->bss->terminal_cgroup = 0; } @@ -174,7 +174,7 @@ static void test_early_termination(struct cgroup_iter *skel) PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]); read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], - BPF_ITER_DESCENDANTS_PRE, "early_termination"); + BPF_CGROUP_ITER_DESCENDANTS_PRE, "early_termination"); skel->bss->terminate_early = 0; } @@ -186,7 +186,7 @@ static void test_walk_self_only(struct cgroup_iter *skel) PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]); read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], - BPF_ITER_SELF_ONLY, "self_only"); + BPF_CGROUP_ITER_SELF_ONLY, "self_only"); } void test_cgroup_iter(void) -- cgit v1.2.3 From 343949e10798a52c6d6a14effc962e010ed471ae Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Wed, 24 Aug 2022 15:40:37 +0200 Subject: libbpf: add map_get_fd_by_id and map_delete_elem in light skeleton This allows to have a better control over maps from the kernel when preloading eBPF programs. Acked-by: Yonghong Song Signed-off-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20220824134055.1328882-8-benjamin.tissoires@redhat.com Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/skel_internal.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'tools') diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 00c5f94b43be..1e82ab06c3eb 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -251,6 +251,29 @@ static inline int skel_map_update_elem(int fd, const void *key, return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); } +static inline int skel_map_delete_elem(int fd, const void *key) +{ + const size_t attr_sz = offsetofend(union bpf_attr, flags); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.map_fd = fd; + attr.key = (long)key; + + return skel_sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); +} + +static inline int skel_map_get_fd_by_id(__u32 id) +{ + const size_t attr_sz = offsetofend(union bpf_attr, flags); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.map_id = id; + + return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); +} + static inline int skel_raw_tracepoint_open(const char *name, int prog_fd) { const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd); -- cgit v1.2.3 From ab9ac19c4d0615fee40ec7d49fa16c9fd33f61f8 Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Thu, 25 Aug 2022 23:06:59 -0600 Subject: selftests/bpf: fix type conflict in test_tc_dtime The sys/socket.h header isn't required to build test_tc_dtime and may cause a type conflict. Fixes the following error: In file included from /usr/include/x86_64-linux-gnu/sys/types.h:155, from /usr/include/x86_64-linux-gnu/bits/socket.h:29, from /usr/include/x86_64-linux-gnu/sys/socket.h:33, from progs/test_tc_dtime.c:18: /usr/include/x86_64-linux-gnu/bits/stdint-intn.h:24:18: error: conflicting types for 'int8_t'; have '__int8_t' {aka 'signed char'} 24 | typedef __int8_t int8_t; | ^~~~~~ In file included from progs/test_tc_dtime.c:5: /home/buildroot/opt/cross/lib/gcc/bpf/13.0.0/include/stdint.h:34:23: note: previous declaration of 'int8_t' with type 'int8_t' {aka 'char'} 34 | typedef __INT8_TYPE__ int8_t; | ^~~~~~ /usr/include/x86_64-linux-gnu/bits/stdint-intn.h:27:19: error: conflicting types for 'int64_t'; have '__int64_t' {aka 'long long int'} 27 | typedef __int64_t int64_t; | ^~~~~~~ /home/buildroot/opt/cross/lib/gcc/bpf/13.0.0/include/stdint.h:43:24: note: previous declaration of 'int64_t' with type 'int64_t' {aka 'long int'} 43 | typedef __INT64_TYPE__ int64_t; | ^~~~~~~ make: *** [Makefile:537: /home/buildroot/bpf-next/tools/testing/selftests/bpf/bpf_gcc/test_tc_dtime.o] Error 1 Signed-off-by: James Hilliard Link: https://lore.kernel.org/r/20220826050703.869571-1-james.hilliard1@gmail.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/progs/test_tc_dtime.c | 1 - 1 file changed, 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c index b596479a9ebe..125beec31834 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c +++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c @@ -15,7 +15,6 @@ #include #include #include -#include /* veth_src --- veth_src_fwd --- veth_det_fwd --- veth_dst * | | -- cgit v1.2.3 From b05d64efbb21ad231516b44317af34d2b586cfc4 Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Thu, 25 Aug 2022 21:51:39 -0600 Subject: selftests/bpf: Declare subprog_noise as static in tailcall_bpf2bpf4 Due to bpf_map_lookup_elem being declared static we need to also declare subprog_noise as static. Fixes the following error: progs/tailcall_bpf2bpf4.c:26:9: error: 'bpf_map_lookup_elem' is static but used in inline function 'subprog_noise' which is not static [-Werror] 26 | bpf_map_lookup_elem(&nop_table, &key); | ^~~~~~~~~~~~~~~~~~~ Signed-off-by: James Hilliard Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20220826035141.737919-1-james.hilliard1@gmail.com --- tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c index b67e8022d500..a017d6b2f1dd 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c @@ -19,7 +19,7 @@ struct { int count = 0; int noise = 0; -__always_inline int subprog_noise(void) +static __always_inline int subprog_noise(void) { __u32 key = 0; -- cgit v1.2.3 From aa75622c3be4d5819ce69c714acbcbd67bba5d65 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 25 Aug 2022 23:08:06 +0100 Subject: bpf: Fix a few typos in BPF helpers documentation Address a few typos in the documentation for the BPF helper functions. They were reported by Jakub [0], who ran spell checkers on the generated man page [1]. [0] https://lore.kernel.org/linux-man/d22dcd47-023c-8f52-d369-7b5308e6c842@gmail.com/T/#mb02e7d4b7fb61d98fa914c77b581184e9a9537af [1] https://lore.kernel.org/linux-man/eb6a1e41-c48e-ac45-5154-ac57a2c76108@gmail.com/T/#m4a8d1b003616928013ffcd1450437309ab652f9f v3: Do not copy unrelated (and breaking) elements to tools/ header v2: Turn a ',' into a ';' Reported-by: Jakub Wilk Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220825220806.107143-1-quentin@isovalent.com --- tools/include/uapi/linux/bpf.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 92f7387e378a..f4ba82a1eace 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4456,7 +4456,7 @@ union bpf_attr { * * **-EEXIST** if the option already exists. * - * **-EFAULT** on failrue to parse the existing header options. + * **-EFAULT** on failure to parse the existing header options. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. @@ -4665,7 +4665,7 @@ union bpf_attr { * a *map* with *task* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this - * helper enforces the key must be an task_struct and the map must also + * helper enforces the key must be a task_struct and the map must also * be a **BPF_MAP_TYPE_TASK_STORAGE**. * * Underneath, the value is stored locally at *task* instead of @@ -4723,7 +4723,7 @@ union bpf_attr { * * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size) * Description - * Returns the stored IMA hash of the *inode* (if it's avaialable). + * Returns the stored IMA hash of the *inode* (if it's available). * If the hash is larger than *size*, then only *size* * bytes will be copied to *dst* * Return @@ -4747,12 +4747,12 @@ union bpf_attr { * * The argument *len_diff* can be used for querying with a planned * size change. This allows to check MTU prior to changing packet - * ctx. Providing an *len_diff* adjustment that is larger than the + * ctx. Providing a *len_diff* adjustment that is larger than the * actual packet size (resulting in negative packet size) will in - * principle not exceed the MTU, why it is not considered a - * failure. Other BPF-helpers are needed for performing the - * planned size change, why the responsability for catch a negative - * packet size belong in those helpers. + * principle not exceed the MTU, which is why it is not considered + * a failure. Other BPF helpers are needed for performing the + * planned size change; therefore the responsibility for catching + * a negative packet size belongs in those helpers. * * Specifying *ifindex* zero means the MTU check is performed * against the current net device. This is practical if this isn't -- cgit v1.2.3 From 3721359d3907c313833a2fd6e40c36a30179ea89 Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Thu, 25 Aug 2022 23:29:22 -0600 Subject: selftests/bpf: Fix bind{4,6} tcp/socket header type conflict There is a potential for us to hit a type conflict when including netinet/tcp.h with sys/socket.h, we can remove these as they are not actually needed. Fixes errors like the below when compiling with gcc BPF backend: In file included from /usr/include/netinet/tcp.h:91, from progs/bind4_prog.c:10: /home/buildroot/opt/cross/lib/gcc/bpf/13.0.0/include/stdint.h:34:23: error: conflicting types for 'int8_t'; have 'char' 34 | typedef __INT8_TYPE__ int8_t; | ^~~~~~ In file included from /usr/include/x86_64-linux-gnu/sys/types.h:155, from /usr/include/x86_64-linux-gnu/bits/socket.h:29, from /usr/include/x86_64-linux-gnu/sys/socket.h:33, from progs/bind4_prog.c:9: /usr/include/x86_64-linux-gnu/bits/stdint-intn.h:24:18: note: previous declaration of 'int8_t' with type 'int8_t' {aka 'signed char'} 24 | typedef __int8_t int8_t; | ^~~~~~ /home/buildroot/opt/cross/lib/gcc/bpf/13.0.0/include/stdint.h:43:24: error: conflicting types for 'int64_t'; have 'long int' 43 | typedef __INT64_TYPE__ int64_t; | ^~~~~~~ /usr/include/x86_64-linux-gnu/bits/stdint-intn.h:27:19: note: previous declaration of 'int64_t' with type 'int64_t' {aka 'long long int'} 27 | typedef __int64_t int64_t; | ^~~~~~~ make: *** [Makefile:537: /home/buildroot/bpf-next/tools/testing/selftests/bpf/bpf_gcc/bind4_prog.o] Error 1 Signed-off-by: James Hilliard Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220826052925.980431-1-james.hilliard1@gmail.com --- tools/testing/selftests/bpf/progs/bind4_prog.c | 2 -- tools/testing/selftests/bpf/progs/bind6_prog.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/bind4_prog.c b/tools/testing/selftests/bpf/progs/bind4_prog.c index 474c6a62078a..a487f60b73ac 100644 --- a/tools/testing/selftests/bpf/progs/bind4_prog.c +++ b/tools/testing/selftests/bpf/progs/bind4_prog.c @@ -6,8 +6,6 @@ #include #include #include -#include -#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bind6_prog.c b/tools/testing/selftests/bpf/progs/bind6_prog.c index c19cfa869f30..d62cd9e9cf0e 100644 --- a/tools/testing/selftests/bpf/progs/bind6_prog.c +++ b/tools/testing/selftests/bpf/progs/bind6_prog.c @@ -6,8 +6,6 @@ #include #include #include -#include -#include #include #include -- cgit v1.2.3 From 2eb680401df62c035ff50a7faf1296565b030df7 Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Mon, 29 Aug 2022 09:47:09 -0600 Subject: selftests/bpf: Fix connect4_prog tcp/socket header type conflict There is a potential for us to hit a type conflict when including netinet/tcp.h and sys/socket.h, we can replace both of these includes with linux/tcp.h and bpf_tcp_helpers.h to avoid this conflict. Fixes errors like the below when compiling with gcc BPF backend: In file included from /usr/include/netinet/tcp.h:91, from progs/connect4_prog.c:11: /home/buildroot/opt/cross/lib/gcc/bpf/13.0.0/include/stdint.h:34:23: error: conflicting types for 'int8_t'; have 'char' 34 | typedef __INT8_TYPE__ int8_t; | ^~~~~~ In file included from /usr/include/x86_64-linux-gnu/sys/types.h:155, from /usr/include/x86_64-linux-gnu/bits/socket.h:29, from /usr/include/x86_64-linux-gnu/sys/socket.h:33, from progs/connect4_prog.c:10: /usr/include/x86_64-linux-gnu/bits/stdint-intn.h:24:18: note: previous declaration of 'int8_t' with type 'int8_t' {aka 'signed char'} 24 | typedef __int8_t int8_t; | ^~~~~~ /home/buildroot/opt/cross/lib/gcc/bpf/13.0.0/include/stdint.h:43:24: error: conflicting types for 'int64_t'; have 'long int' 43 | typedef __INT64_TYPE__ int64_t; | ^~~~~~~ /usr/include/x86_64-linux-gnu/bits/stdint-intn.h:27:19: note: previous declaration of 'int64_t' with type 'int64_t' {aka 'long long int'} 27 | typedef __int64_t int64_t; | ^~~~~~~ Signed-off-by: James Hilliard Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220829154710.3870139-1-james.hilliard1@gmail.com --- tools/testing/selftests/bpf/progs/connect4_prog.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index b241932911db..ec25371de789 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -7,14 +7,15 @@ #include #include #include -#include -#include +#include #include #include #include #include +#include "bpf_tcp_helpers.h" + #define SRC_REWRITE_IP4 0x7f000004U #define DST_REWRITE_IP4 0x7f000001U #define DST_REWRITE_PORT4 4444 -- cgit v1.2.3 From 6f95de6d713130c953af0a40b13c1da519f91c4e Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Mon, 29 Aug 2022 16:18:28 -0700 Subject: bpftool: Add support for querying cgroup_iter link Support dumping info of a cgroup_iter link. This includes showing the cgroup's id and the order for walking the cgroup hierarchy. Example output is as follows: > bpftool link show 1: iter prog 2 target_name bpf_map 2: iter prog 3 target_name bpf_prog 3: iter prog 12 target_name cgroup cgroup_id 72 order self_only > bpftool -p link show [{ "id": 1, "type": "iter", "prog_id": 2, "target_name": "bpf_map" },{ "id": 2, "type": "iter", "prog_id": 3, "target_name": "bpf_prog" },{ "id": 3, "type": "iter", "prog_id": 12, "target_name": "cgroup", "cgroup_id": 72, "order": "self_only" } ] Signed-off-by: Hao Luo Reviewed-by: Quentin Monnet Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20220829231828.1016835-1-haoluo@google.com Signed-off-by: Martin KaFai Lau --- tools/bpf/bpftool/link.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'tools') diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 7a20931c3250..ef0dc2f8d5a2 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -83,6 +83,29 @@ static bool is_iter_map_target(const char *target_name) strcmp(target_name, "bpf_sk_storage_map") == 0; } +static bool is_iter_cgroup_target(const char *target_name) +{ + return strcmp(target_name, "cgroup") == 0; +} + +static const char *cgroup_order_string(__u32 order) +{ + switch (order) { + case BPF_CGROUP_ITER_ORDER_UNSPEC: + return "order_unspec"; + case BPF_CGROUP_ITER_SELF_ONLY: + return "self_only"; + case BPF_CGROUP_ITER_DESCENDANTS_PRE: + return "descendants_pre"; + case BPF_CGROUP_ITER_DESCENDANTS_POST: + return "descendants_post"; + case BPF_CGROUP_ITER_ANCESTORS_UP: + return "ancestors_up"; + default: /* won't happen */ + return "unknown"; + } +} + static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr) { const char *target_name = u64_to_ptr(info->iter.target_name); @@ -91,6 +114,12 @@ static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr) if (is_iter_map_target(target_name)) jsonw_uint_field(wtr, "map_id", info->iter.map.map_id); + + if (is_iter_cgroup_target(target_name)) { + jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id); + jsonw_string_field(wtr, "order", + cgroup_order_string(info->iter.cgroup.order)); + } } static int get_prog_info(int prog_id, struct bpf_prog_info *info) @@ -208,6 +237,12 @@ static void show_iter_plain(struct bpf_link_info *info) if (is_iter_map_target(target_name)) printf("map_id %u ", info->iter.map.map_id); + + if (is_iter_cgroup_target(target_name)) { + printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id); + printf("order %s ", + cgroup_order_string(info->iter.cgroup.order)); + } } static int show_link_close_plain(int fd, struct bpf_link_info *info) -- cgit v1.2.3 From 14e5ce79943a72b9bf0fff8a5867320a9fa3e40d Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Mon, 29 Aug 2022 15:05:46 -0600 Subject: libbpf: Add GCC support for bpf_tail_call_static The bpf_tail_call_static function is currently not defined unless using clang >= 8. To support bpf_tail_call_static on GCC we can check if __clang__ is not defined to enable bpf_tail_call_static. We need to use GCC assembly syntax when the compiler does not define __clang__ as LLVM inline assembly is not fully compatible with GCC. Signed-off-by: James Hilliard Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220829210546.755377-1-james.hilliard1@gmail.com --- tools/lib/bpf/bpf_helpers.h | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 7349b16b8e2f..867b734839dd 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -131,7 +131,7 @@ /* * Helper function to perform a tail call with a constant/immediate map slot. */ -#if __clang_major__ >= 8 && defined(__bpf__) +#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__) static __always_inline void bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) { @@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) __bpf_unreachable(); /* - * Provide a hard guarantee that LLVM won't optimize setting r2 (map - * pointer) and r3 (constant map index) from _different paths_ ending + * Provide a hard guarantee that the compiler won't optimize setting r2 + * (map pointer) and r3 (constant map index) from _different paths_ ending * up at the _same_ call insn as otherwise we won't be able to use the * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key @@ -148,12 +148,19 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) * * Note on clobber list: we need to stay in-line with BPF calling * convention, so even if we don't end up using r0, r4, r5, we need - * to mark them as clobber so that LLVM doesn't end up using them - * before / after the call. + * to mark them as clobber so that the compiler doesn't end up using + * them before / after the call. */ - asm volatile("r1 = %[ctx]\n\t" + asm volatile( +#ifdef __clang__ + "r1 = %[ctx]\n\t" "r2 = %[map]\n\t" "r3 = %[slot]\n\t" +#else + "mov %%r1,%[ctx]\n\t" + "mov %%r2,%[map]\n\t" + "mov %%r3,%[slot]\n\t" +#endif "call 12" :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) : "r0", "r1", "r2", "r3", "r4", "r5"); -- cgit v1.2.3 From 197072945a708d62181895409effdfcda80c7798 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 30 Aug 2022 16:19:53 -0700 Subject: selftest/bpf: Ensure no module loading in bpf_setsockopt(TCP_CONGESTION) This patch adds a test to ensure bpf_setsockopt(TCP_CONGESTION, "not_exist") will not trigger the kernel module autoload. Before the fix: [ 40.535829] BUG: sleeping function called from invalid context at include/linux/sched/mm.h:274 [...] [ 40.552134] tcp_ca_find_autoload.constprop.0+0xcb/0x200 [ 40.552689] tcp_set_congestion_control+0x99/0x7b0 [ 40.553203] do_tcp_setsockopt+0x3ed/0x2240 [...] [ 40.556041] __bpf_setsockopt+0x124/0x640 Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220830231953.792412-1-martin.lau@linux.dev --- tools/testing/selftests/bpf/progs/setget_sockopt.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 40606ef47a38..79debf3c2f44 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -32,6 +32,7 @@ struct sockopt_test { unsigned int flip:1; }; +static const char not_exist_cc[] = "not_exist"; static const char cubic_cc[] = "cubic"; static const char reno_cc[] = "reno"; @@ -307,6 +308,9 @@ static int bpf_test_tcp_sockopt(__u32 i, struct loop_ctx *lc) const char *new_cc; int new_cc_len; + if (!bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, + (void *)not_exist_cc, sizeof(not_exist_cc))) + return 1; if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc))) return 1; if (!bpf_strncmp(old_cc, sizeof(old_cc), cubic_cc)) { -- cgit v1.2.3 From 1c636b6277a2b2bf504df490b8dbadd2bd34ccd4 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 31 Aug 2022 12:26:29 +0800 Subject: selftests/bpf: Add test cases for htab update One test demonstrates the reentrancy of hash map update on the same bucket should fail, and another one shows concureently updates of the same hash map bucket should succeed and not fail due to the reentrancy checking for bucket lock. There is no trampoline support on s390x, so move htab_update to denylist. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20220831042629.130006-4-houtao@huaweicloud.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/DENYLIST.s390x | 1 + .../testing/selftests/bpf/prog_tests/htab_update.c | 126 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/htab_update.c | 29 +++++ 3 files changed, 156 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/htab_update.c create mode 100644 tools/testing/selftests/bpf/progs/htab_update.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index 736b65f61022..ba02b559ca68 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -68,3 +68,4 @@ unpriv_bpf_disabled # fentry setget_sockopt # attach unexpected error: -524 (trampoline) cb_refs # expected error message unexpected error: -524 (trampoline) cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc) +htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline) diff --git a/tools/testing/selftests/bpf/prog_tests/htab_update.c b/tools/testing/selftests/bpf/prog_tests/htab_update.c new file mode 100644 index 000000000000..2bc85f4814f4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/htab_update.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include +#include +#include +#include "htab_update.skel.h" + +struct htab_update_ctx { + int fd; + int loop; + bool stop; +}; + +static void test_reenter_update(void) +{ + struct htab_update *skel; + unsigned int key, value; + int err; + + skel = htab_update__open(); + if (!ASSERT_OK_PTR(skel, "htab_update__open")) + return; + + /* lookup_elem_raw() may be inlined and find_kernel_btf_id() will return -ESRCH */ + bpf_program__set_autoload(skel->progs.lookup_elem_raw, true); + err = htab_update__load(skel); + if (!ASSERT_TRUE(!err || err == -ESRCH, "htab_update__load") || err) + goto out; + + skel->bss->pid = getpid(); + err = htab_update__attach(skel); + if (!ASSERT_OK(err, "htab_update__attach")) + goto out; + + /* Will trigger the reentrancy of bpf_map_update_elem() */ + key = 0; + value = 0; + err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, &value, 0); + if (!ASSERT_OK(err, "add element")) + goto out; + + ASSERT_EQ(skel->bss->update_err, -EBUSY, "no reentrancy"); +out: + htab_update__destroy(skel); +} + +static void *htab_update_thread(void *arg) +{ + struct htab_update_ctx *ctx = arg; + cpu_set_t cpus; + int i; + + /* Pinned on CPU 0 */ + CPU_ZERO(&cpus); + CPU_SET(0, &cpus); + pthread_setaffinity_np(pthread_self(), sizeof(cpus), &cpus); + + i = 0; + while (i++ < ctx->loop && !ctx->stop) { + unsigned int key = 0, value = 0; + int err; + + err = bpf_map_update_elem(ctx->fd, &key, &value, 0); + if (err) { + ctx->stop = true; + return (void *)(long)err; + } + } + + return NULL; +} + +static void test_concurrent_update(void) +{ + struct htab_update_ctx ctx; + struct htab_update *skel; + unsigned int i, nr; + pthread_t *tids; + int err; + + skel = htab_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "htab_update__open_and_load")) + return; + + ctx.fd = bpf_map__fd(skel->maps.htab); + ctx.loop = 1000; + ctx.stop = false; + + nr = 4; + tids = calloc(nr, sizeof(*tids)); + if (!ASSERT_NEQ(tids, NULL, "no mem")) + goto out; + + for (i = 0; i < nr; i++) { + err = pthread_create(&tids[i], NULL, htab_update_thread, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + unsigned int j; + + ctx.stop = true; + for (j = 0; j < i; j++) + pthread_join(tids[j], NULL); + goto out; + } + } + + for (i = 0; i < nr; i++) { + void *thread_err = NULL; + + pthread_join(tids[i], &thread_err); + ASSERT_EQ(thread_err, NULL, "update error"); + } + +out: + if (tids) + free(tids); + htab_update__destroy(skel); +} + +void test_htab_update(void) +{ + if (test__start_subtest("reenter_update")) + test_reenter_update(); + if (test__start_subtest("concurrent_update")) + test_concurrent_update(); +} diff --git a/tools/testing/selftests/bpf/progs/htab_update.c b/tools/testing/selftests/bpf/progs/htab_update.c new file mode 100644 index 000000000000..7481bb30b29b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/htab_update.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022. Huawei Technologies Co., Ltd */ +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} htab SEC(".maps"); + +int pid = 0; +int update_err = 0; + +SEC("?fentry/lookup_elem_raw") +int lookup_elem_raw(void *ctx) +{ + __u32 key = 0, value = 1; + + if ((bpf_get_current_pid_tgid() >> 32) != pid) + return 0; + + update_err = bpf_map_update_elem(&htab, &key, &value, 0); + return 0; +} -- cgit v1.2.3 From c710136e87747f1cc8e24948b3046ee57a1fe2eb Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Thu, 1 Sep 2022 14:19:37 +0800 Subject: selftests/bpf: Move sys_pidfd_open() into task_local_storage_helpers.h sys_pidfd_open() is defined twice in both test_bprm_opts.c and test_local_storage.c, so move it to a common header file. And it will be used in map_tests as well. Signed-off-by: Hou Tao Acked-by: Alexei Starovoitov Link: https://lore.kernel.org/r/20220901061938.3789460-4-houtao@huaweicloud.com Signed-off-by: Martin KaFai Lau --- .../testing/selftests/bpf/prog_tests/test_bprm_opts.c | 10 +--------- .../selftests/bpf/prog_tests/test_local_storage.c | 10 +--------- .../testing/selftests/bpf/task_local_storage_helpers.h | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+), 18 deletions(-) create mode 100644 tools/testing/selftests/bpf/task_local_storage_helpers.h (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c index 2559bb775762..a0054019e677 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c @@ -9,18 +9,10 @@ #include "bprm_opts.skel.h" #include "network_helpers.h" - -#ifndef __NR_pidfd_open -#define __NR_pidfd_open 434 -#endif +#include "task_local_storage_helpers.h" static const char * const bash_envp[] = { "TMPDIR=shouldnotbeset", NULL }; -static inline int sys_pidfd_open(pid_t pid, unsigned int flags) -{ - return syscall(__NR_pidfd_open, pid, flags); -} - static int update_storage(int map_fd, int secureexec) { int task_fd, ret = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c index 26ac26a88026..9c77cd6b1eaf 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c @@ -11,15 +11,7 @@ #include "local_storage.skel.h" #include "network_helpers.h" - -#ifndef __NR_pidfd_open -#define __NR_pidfd_open 434 -#endif - -static inline int sys_pidfd_open(pid_t pid, unsigned int flags) -{ - return syscall(__NR_pidfd_open, pid, flags); -} +#include "task_local_storage_helpers.h" static unsigned int duration; diff --git a/tools/testing/selftests/bpf/task_local_storage_helpers.h b/tools/testing/selftests/bpf/task_local_storage_helpers.h new file mode 100644 index 000000000000..711d5abb7d51 --- /dev/null +++ b/tools/testing/selftests/bpf/task_local_storage_helpers.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TASK_LOCAL_STORAGE_HELPER_H +#define __TASK_LOCAL_STORAGE_HELPER_H + +#include +#include +#include + +#ifndef __NR_pidfd_open +#define __NR_pidfd_open 434 +#endif + +static inline int sys_pidfd_open(pid_t pid, unsigned int flags) +{ + return syscall(__NR_pidfd_open, pid, flags); +} + +#endif -- cgit v1.2.3 From 73b97bc78b32eb739a7dd3394fa3981e8021c0ef Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Thu, 1 Sep 2022 14:19:38 +0800 Subject: selftests/bpf: Test concurrent updates on bpf_task_storage_busy Under full preemptible kernel, task local storage lookup operations on the same CPU may update per-cpu bpf_task_storage_busy concurrently. If the update of bpf_task_storage_busy is not preemption safe, the final value of bpf_task_storage_busy may become not-zero forever and bpf_task_storage_trylock() will always fail. So add a test case to ensure the update of bpf_task_storage_busy is preemption safe. Will skip the test case when CONFIG_PREEMPT is disabled, and it can only reproduce the problem probabilistically. By increasing TASK_STORAGE_MAP_NR_LOOP and running it under ARM64 VM with 4-cpus, it takes about four rounds to reproduce: > test_maps is modified to only run test_task_storage_map_stress_lookup() $ export TASK_STORAGE_MAP_NR_THREAD=256 $ export TASK_STORAGE_MAP_NR_LOOP=81920 $ export TASK_STORAGE_MAP_PIN_CPU=1 $ time ./test_maps test_task_storage_map_stress_lookup(135):FAIL:bad bpf_task_storage_busy got -2 real 0m24.743s user 0m6.772s sys 0m17.966s Signed-off-by: Hou Tao Acked-by: Alexei Starovoitov Link: https://lore.kernel.org/r/20220901061938.3789460-5-houtao@huaweicloud.com Signed-off-by: Martin KaFai Lau --- .../selftests/bpf/map_tests/task_storage_map.c | 122 +++++++++++++++++++++ .../bpf/progs/read_bpf_task_storage_busy.c | 39 +++++++ 2 files changed, 161 insertions(+) create mode 100644 tools/testing/selftests/bpf/map_tests/task_storage_map.c create mode 100644 tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/map_tests/task_storage_map.c b/tools/testing/selftests/bpf/map_tests/task_storage_map.c new file mode 100644 index 000000000000..1adc9c292eb2 --- /dev/null +++ b/tools/testing/selftests/bpf/map_tests/task_storage_map.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "test_maps.h" +#include "task_local_storage_helpers.h" +#include "read_bpf_task_storage_busy.skel.h" + +struct lookup_ctx { + bool start; + bool stop; + int pid_fd; + int map_fd; + int loop; +}; + +static void *lookup_fn(void *arg) +{ + struct lookup_ctx *ctx = arg; + long value; + int i = 0; + + while (!ctx->start) + usleep(1); + + while (!ctx->stop && i++ < ctx->loop) + bpf_map_lookup_elem(ctx->map_fd, &ctx->pid_fd, &value); + return NULL; +} + +static void abort_lookup(struct lookup_ctx *ctx, pthread_t *tids, unsigned int nr) +{ + unsigned int i; + + ctx->stop = true; + ctx->start = true; + for (i = 0; i < nr; i++) + pthread_join(tids[i], NULL); +} + +void test_task_storage_map_stress_lookup(void) +{ +#define MAX_NR_THREAD 4096 + unsigned int i, nr = 256, loop = 8192, cpu = 0; + struct read_bpf_task_storage_busy *skel; + pthread_t tids[MAX_NR_THREAD]; + struct lookup_ctx ctx; + cpu_set_t old, new; + const char *cfg; + int err; + + cfg = getenv("TASK_STORAGE_MAP_NR_THREAD"); + if (cfg) { + nr = atoi(cfg); + if (nr > MAX_NR_THREAD) + nr = MAX_NR_THREAD; + } + cfg = getenv("TASK_STORAGE_MAP_NR_LOOP"); + if (cfg) + loop = atoi(cfg); + cfg = getenv("TASK_STORAGE_MAP_PIN_CPU"); + if (cfg) + cpu = atoi(cfg); + + skel = read_bpf_task_storage_busy__open_and_load(); + err = libbpf_get_error(skel); + CHECK(err, "open_and_load", "error %d\n", err); + + /* Only for a fully preemptible kernel */ + if (!skel->kconfig->CONFIG_PREEMPT) + return; + + /* Save the old affinity setting */ + sched_getaffinity(getpid(), sizeof(old), &old); + + /* Pinned on a specific CPU */ + CPU_ZERO(&new); + CPU_SET(cpu, &new); + sched_setaffinity(getpid(), sizeof(new), &new); + + ctx.start = false; + ctx.stop = false; + ctx.pid_fd = sys_pidfd_open(getpid(), 0); + ctx.map_fd = bpf_map__fd(skel->maps.task); + ctx.loop = loop; + for (i = 0; i < nr; i++) { + err = pthread_create(&tids[i], NULL, lookup_fn, &ctx); + if (err) { + abort_lookup(&ctx, tids, i); + CHECK(err, "pthread_create", "error %d\n", err); + goto out; + } + } + + ctx.start = true; + for (i = 0; i < nr; i++) + pthread_join(tids[i], NULL); + + skel->bss->pid = getpid(); + err = read_bpf_task_storage_busy__attach(skel); + CHECK(err, "attach", "error %d\n", err); + + /* Trigger program */ + syscall(SYS_gettid); + skel->bss->pid = 0; + + CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy); +out: + read_bpf_task_storage_busy__destroy(skel); + /* Restore affinity setting */ + sched_setaffinity(getpid(), sizeof(old), &old); +} diff --git a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c new file mode 100644 index 000000000000..a47bb0120719 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022. Huawei Technologies Co., Ltd */ +#include "vmlinux.h" +#include +#include + +extern bool CONFIG_PREEMPT __kconfig __weak; +extern const int bpf_task_storage_busy __ksym; + +char _license[] SEC("license") = "GPL"; + +int pid = 0; +int busy = 0; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, long); +} task SEC(".maps"); + +SEC("raw_tp/sys_enter") +int BPF_PROG(read_bpf_task_storage_busy) +{ + int *value; + int key; + + if (!CONFIG_PREEMPT) + return 0; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + value = bpf_this_cpu_ptr(&bpf_task_storage_busy); + if (value) + busy = *value; + + return 0; +} -- cgit v1.2.3 From 44c51472bef83bb70b43e2f4b7a592096f32a855 Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Wed, 31 Aug 2022 17:40:09 +0300 Subject: bpf: Support getting tunnel flags Existing 'bpf_skb_get_tunnel_key' extracts various tunnel parameters (id, ttl, tos, local and remote) but does not expose ip_tunnel_info's tun_flags to the BPF program. It makes sense to expose tun_flags to the BPF program. Assume for example multiple GRE tunnels maintained on a single GRE interface in collect_md mode. The program expects origins to initiate over GRE, however different origins use different GRE characteristics (e.g. some prefer to use GRE checksum, some do not; some pass a GRE key, some do not, etc..). A BPF program getting tun_flags can therefore remember the relevant flags (e.g. TUNNEL_CSUM, TUNNEL_SEQ...) for each initiating remote. In the reply path, the program can use 'bpf_skb_set_tunnel_key' in order to correctly reply to the remote, using similar characteristics, based on the stored tunnel flags. Introduce BPF_F_TUNINFO_FLAGS flag for bpf_skb_get_tunnel_key. If specified, 'bpf_tunnel_key->tunnel_flags' is set with the tun_flags. Decided to use the existing unused 'tunnel_ext' as the storage for the 'tunnel_flags' in order to avoid changing bpf_tunnel_key's layout. Also, the following has been considered during the design: 1. Convert the "interesting" internal TUNNEL_xxx flags back to BPF_F_yyy and place into the new 'tunnel_flags' field. This has 2 drawbacks: - The BPF_F_yyy flags are from *set_tunnel_key* enumeration space, e.g. BPF_F_ZERO_CSUM_TX. It is awkward that it is "returned" into tunnel_flags from a *get_tunnel_key* call. - Not all "interesting" TUNNEL_xxx flags can be mapped to existing BPF_F_yyy flags, and it doesn't make sense to create new BPF_F_yyy flags just for purposes of the returned tunnel_flags. 2. Place key.tun_flags into 'tunnel_flags' but mask them, keeping only "interesting" flags. That's ok, but the drawback is that what's "interesting" for my usecase might be limiting for other usecases. Therefore I decided to expose what's in key.tun_flags *as is*, which seems most flexible. The BPF user can just choose to ignore bits he's not interested in. The TUNNEL_xxx are also UAPI, so no harm exposing them back in the get_tunnel_key call. Signed-off-by: Shmulik Ladkani Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220831144010.174110-1-shmulik.ladkani@gmail.com --- tools/include/uapi/linux/bpf.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index f4ba82a1eace..793103b10eab 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5659,6 +5659,11 @@ enum { BPF_F_SEQ_NUMBER = (1ULL << 3), }; +/* BPF_FUNC_skb_get_tunnel_key flags. */ +enum { + BPF_F_TUNINFO_FLAGS = (1ULL << 4), +}; + /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ @@ -5848,7 +5853,10 @@ struct bpf_tunnel_key { }; __u8 tunnel_tos; __u8 tunnel_ttl; - __u16 tunnel_ext; /* Padding, future use. */ + union { + __u16 tunnel_ext; /* compat */ + __be16 tunnel_flags; + }; __u32 tunnel_label; union { __u32 local_ipv4; -- cgit v1.2.3 From 8cc61b7a6416541261d56bcdd93a711407f711ba Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Wed, 31 Aug 2022 17:40:10 +0300 Subject: selftests/bpf: Amend test_tunnel to exercise BPF_F_TUNINFO_FLAGS Get the tunnel flags in {ipv6}vxlan_get_tunnel_src and ensure they are aligned with tunnel params set at {ipv6}vxlan_set_tunnel_dst. Signed-off-by: Shmulik Ladkani Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220831144010.174110-2-shmulik.ladkani@gmail.com --- .../testing/selftests/bpf/progs/test_tunnel_kern.c | 24 ++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index df0673c4ecbe..98af55f0bcd3 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -386,7 +387,8 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb) __u32 orig_daddr; __u32 index = 0; - ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), + BPF_F_TUNINFO_FLAGS); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; @@ -398,10 +400,13 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb) return TC_ACT_SHOT; } - if (key.local_ipv4 != ASSIGNED_ADDR_VETH1 || md.gbp != 0x800FF) { - bpf_printk("vxlan key %d local ip 0x%x remote ip 0x%x gbp 0x%x\n", + if (key.local_ipv4 != ASSIGNED_ADDR_VETH1 || md.gbp != 0x800FF || + !(key.tunnel_flags & TUNNEL_KEY) || + (key.tunnel_flags & TUNNEL_CSUM)) { + bpf_printk("vxlan key %d local ip 0x%x remote ip 0x%x gbp 0x%x flags 0x%x\n", key.tunnel_id, key.local_ipv4, - key.remote_ipv4, md.gbp); + key.remote_ipv4, md.gbp, + bpf_ntohs(key.tunnel_flags)); log_err(ret); return TC_ACT_SHOT; } @@ -541,16 +546,19 @@ int ip6vxlan_get_tunnel_src(struct __sk_buff *skb) } ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), - BPF_F_TUNINFO_IPV6); + BPF_F_TUNINFO_IPV6 | BPF_F_TUNINFO_FLAGS); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; } - if (bpf_ntohl(key.local_ipv6[3]) != *local_ip) { - bpf_printk("ip6vxlan key %d local ip6 ::%x remote ip6 ::%x label 0x%x\n", + if (bpf_ntohl(key.local_ipv6[3]) != *local_ip || + !(key.tunnel_flags & TUNNEL_KEY) || + !(key.tunnel_flags & TUNNEL_CSUM)) { + bpf_printk("ip6vxlan key %d local ip6 ::%x remote ip6 ::%x label 0x%x flags 0x%x\n", key.tunnel_id, bpf_ntohl(key.local_ipv6[3]), - bpf_ntohl(key.remote_ipv6[3]), key.tunnel_label); + bpf_ntohl(key.remote_ipv6[3]), key.tunnel_label, + bpf_ntohs(key.tunnel_flags)); bpf_printk("local_ip 0x%x\n", *local_ip); log_err(ret); return TC_ACT_SHOT; -- cgit v1.2.3 From 0d68e6fe12ada8fbaf35f0978aaf18dfb8d2dbb5 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 1 Sep 2022 13:48:08 +0200 Subject: selftests/xsk: Query for native XDP support Currently, xdpxceiver assumes that underlying device supports XDP in native mode - it is fine by now since tests can run only on a veth pair. Future commit is going to allow running test suite against physical devices, so let us query the device if it is capable of running XDP programs in native mode. This way xdpxceiver will not try to run TEST_MODE_DRV if device being tested is not supporting it. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20220901114813.16275-2-maciej.fijalkowski@intel.com --- tools/testing/selftests/bpf/xskxceiver.c | 39 ++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 14b4737b223c..19f65bce7f65 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -99,6 +99,8 @@ #include #include "xsk.h" #include "xskxceiver.h" +#include +#include #include "../kselftest.h" /* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf. @@ -1712,10 +1714,40 @@ static void ifobject_delete(struct ifobject *ifobj) free(ifobj); } +static bool is_xdp_supported(struct ifobject *ifobject) +{ + int flags = XDP_FLAGS_DRV_MODE; + + LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags); + struct bpf_insn insns[2] = { + BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), + BPF_EXIT_INSN() + }; + int ifindex = if_nametoindex(ifobject->ifname); + int prog_fd, insn_cnt = ARRAY_SIZE(insns); + int err; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); + if (prog_fd < 0) + return false; + + err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL); + if (err) { + close(prog_fd); + return false; + } + + bpf_xdp_detach(ifindex, flags, NULL); + close(prog_fd); + + return true; +} + int main(int argc, char **argv) { struct pkt_stream *pkt_stream_default; struct ifobject *ifobj_tx, *ifobj_rx; + int modes = TEST_MODE_SKB + 1; u32 i, j, failed_tests = 0; struct test_spec test; @@ -1743,15 +1775,18 @@ int main(int argc, char **argv) init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, worker_testapp_validate_rx); + if (is_xdp_supported(ifobj_tx)) + modes++; + test_spec_init(&test, ifobj_tx, ifobj_rx, 0); pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); if (!pkt_stream_default) exit_with_error(ENOMEM); test.pkt_stream_default = pkt_stream_default; - ksft_set_plan(TEST_MODE_MAX * TEST_TYPE_MAX); + ksft_set_plan(modes * TEST_TYPE_MAX); - for (i = 0; i < TEST_MODE_MAX; i++) + for (i = 0; i < modes; i++) for (j = 0; j < TEST_TYPE_MAX; j++) { test_spec_init(&test, ifobj_tx, ifobj_rx, i); run_pkt_test(&test, i, j); -- cgit v1.2.3 From 1adef0643b7df1069a53f6f5b7bc66c8234db899 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 1 Sep 2022 13:48:09 +0200 Subject: selftests/xsk: Introduce default Rx pkt stream In order to prepare xdpxceiver for physical device testing, let us introduce default Rx pkt stream. Reason for doing it is that physical device testing will use a UMEM with a doubled size where half of it will be used by Tx and other half by Rx. This means that pkt addresses will differ for Tx and Rx streams. Rx thread will initialize the xsk_umem_info::base_addr that is added here so that pkt_set(), when working on Rx UMEM will add this offset and second half of UMEM space will be used. Note that currently base_addr is 0 on both sides. Future commit will do the mentioned initialization. Previously, veth based testing worked on separate UMEMs, so single default stream was fine. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20220901114813.16275-3-maciej.fijalkowski@intel.com --- tools/testing/selftests/bpf/xskxceiver.c | 74 +++++++++++++++++++++----------- tools/testing/selftests/bpf/xskxceiver.h | 4 +- 2 files changed, 51 insertions(+), 27 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 19f65bce7f65..fbf65135fef6 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -433,15 +433,16 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, ifobj->use_poll = false; ifobj->use_fill_ring = true; ifobj->release_rx = true; - ifobj->pkt_stream = test->pkt_stream_default; ifobj->validation_func = NULL; if (i == 0) { ifobj->rx_on = false; ifobj->tx_on = true; + ifobj->pkt_stream = test->tx_pkt_stream_default; } else { ifobj->rx_on = true; ifobj->tx_on = false; + ifobj->pkt_stream = test->rx_pkt_stream_default; } memset(ifobj->umem, 0, sizeof(*ifobj->umem)); @@ -465,12 +466,15 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, enum test_mode mode) { - struct pkt_stream *pkt_stream; + struct pkt_stream *tx_pkt_stream; + struct pkt_stream *rx_pkt_stream; u32 i; - pkt_stream = test->pkt_stream_default; + tx_pkt_stream = test->tx_pkt_stream_default; + rx_pkt_stream = test->rx_pkt_stream_default; memset(test, 0, sizeof(*test)); - test->pkt_stream_default = pkt_stream; + test->tx_pkt_stream_default = tx_pkt_stream; + test->rx_pkt_stream_default = rx_pkt_stream; for (i = 0; i < MAX_INTERFACES; i++) { struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; @@ -531,16 +535,17 @@ static void pkt_stream_delete(struct pkt_stream *pkt_stream) static void pkt_stream_restore_default(struct test_spec *test) { struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream; + struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream; - if (tx_pkt_stream != test->pkt_stream_default) { + if (tx_pkt_stream != test->tx_pkt_stream_default) { pkt_stream_delete(test->ifobj_tx->pkt_stream); - test->ifobj_tx->pkt_stream = test->pkt_stream_default; + test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default; } - if (test->ifobj_rx->pkt_stream != test->pkt_stream_default && - test->ifobj_rx->pkt_stream != tx_pkt_stream) + if (rx_pkt_stream != test->rx_pkt_stream_default) { pkt_stream_delete(test->ifobj_rx->pkt_stream); - test->ifobj_rx->pkt_stream = test->pkt_stream_default; + test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default; + } } static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) @@ -563,7 +568,7 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len) { - pkt->addr = addr; + pkt->addr = addr + umem->base_addr; pkt->len = len; if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom) pkt->valid = false; @@ -602,22 +607,29 @@ static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); test->ifobj_tx->pkt_stream = pkt_stream; + pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len); test->ifobj_rx->pkt_stream = pkt_stream; } -static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) +static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, + int offset) { - struct xsk_umem_info *umem = test->ifobj_tx->umem; + struct xsk_umem_info *umem = ifobj->umem; struct pkt_stream *pkt_stream; u32 i; - pkt_stream = pkt_stream_clone(umem, test->pkt_stream_default); - for (i = 1; i < test->pkt_stream_default->nb_pkts; i += 2) + pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); + for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size + offset, pkt_len); - test->ifobj_tx->pkt_stream = pkt_stream; - test->ifobj_rx->pkt_stream = pkt_stream; + ifobj->pkt_stream = pkt_stream; +} + +static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) +{ + __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); + __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); } static void pkt_stream_receive_half(struct test_spec *test) @@ -659,7 +671,8 @@ static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) return pkt; } -static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) +static void __pkt_stream_generate_custom(struct ifobject *ifobj, + struct pkt *pkts, u32 nb_pkts) { struct pkt_stream *pkt_stream; u32 i; @@ -668,15 +681,20 @@ static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, if (!pkt_stream) exit_with_error(ENOMEM); - test->ifobj_tx->pkt_stream = pkt_stream; - test->ifobj_rx->pkt_stream = pkt_stream; - for (i = 0; i < nb_pkts; i++) { - pkt_stream->pkts[i].addr = pkts[i].addr; + pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr; pkt_stream->pkts[i].len = pkts[i].len; pkt_stream->pkts[i].payload = i; pkt_stream->pkts[i].valid = pkts[i].valid; } + + ifobj->pkt_stream = pkt_stream; +} + +static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) +{ + __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts); + __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); } static void pkt_dump(void *pkt, u32 len) @@ -1745,7 +1763,8 @@ static bool is_xdp_supported(struct ifobject *ifobject) int main(int argc, char **argv) { - struct pkt_stream *pkt_stream_default; + struct pkt_stream *rx_pkt_stream_default; + struct pkt_stream *tx_pkt_stream_default; struct ifobject *ifobj_tx, *ifobj_rx; int modes = TEST_MODE_SKB + 1; u32 i, j, failed_tests = 0; @@ -1779,10 +1798,12 @@ int main(int argc, char **argv) modes++; test_spec_init(&test, ifobj_tx, ifobj_rx, 0); - pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); - if (!pkt_stream_default) + tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); + rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE); + if (!tx_pkt_stream_default || !rx_pkt_stream_default) exit_with_error(ENOMEM); - test.pkt_stream_default = pkt_stream_default; + test.tx_pkt_stream_default = tx_pkt_stream_default; + test.rx_pkt_stream_default = rx_pkt_stream_default; ksft_set_plan(modes * TEST_TYPE_MAX); @@ -1796,7 +1817,8 @@ int main(int argc, char **argv) failed_tests++; } - pkt_stream_delete(pkt_stream_default); + pkt_stream_delete(tx_pkt_stream_default); + pkt_stream_delete(rx_pkt_stream_default); ifobject_delete(ifobj_tx); ifobject_delete(ifobj_rx); diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index ee97576757a9..8d1c31f127e7 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -99,6 +99,7 @@ struct xsk_umem_info { u32 frame_headroom; void *buffer; u32 frame_size; + u32 base_addr; bool unaligned_mode; }; @@ -159,7 +160,8 @@ struct ifobject { struct test_spec { struct ifobject *ifobj_tx; struct ifobject *ifobj_rx; - struct pkt_stream *pkt_stream_default; + struct pkt_stream *tx_pkt_stream_default; + struct pkt_stream *rx_pkt_stream_default; u16 total_steps; u16 current_step; u16 nb_sockets; -- cgit v1.2.3 From 24037ba7c47b1a50ceb70079d08fc9c135f7df4b Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 1 Sep 2022 13:48:10 +0200 Subject: selftests/xsk: Increase chars for interface name to 16 So that "enp240s0f0" or such name can be used against xskxceiver. While at it, also extend character count for netns name. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20220901114813.16275-4-maciej.fijalkowski@intel.com --- tools/testing/selftests/bpf/xskxceiver.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 8d1c31f127e7..04b298c72f67 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -29,8 +29,8 @@ #define TEST_FAILURE -1 #define TEST_CONTINUE 1 #define MAX_INTERFACES 2 -#define MAX_INTERFACE_NAME_CHARS 7 -#define MAX_INTERFACES_NAMESPACE_CHARS 10 +#define MAX_INTERFACE_NAME_CHARS 16 +#define MAX_INTERFACES_NAMESPACE_CHARS 16 #define MAX_SOCKETS 2 #define MAX_TEST_NAME_SIZE 32 #define MAX_TEARDOWN_ITER 10 -- cgit v1.2.3 From a693ff3ed5610a07b1b0dd831d10f516e13cf6c6 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 1 Sep 2022 13:48:11 +0200 Subject: selftests/xsk: Add support for executing tests on physical device Currently, architecture of xdpxceiver is designed strictly for conducting veth based tests. Veth pair is created together with a network namespace and one of the veth interfaces is moved to the mentioned netns. Then, separate threads for Tx and Rx are spawned which will utilize described setup. Infrastructure described in the paragraph above can not be used for testing AF_XDP support on physical devices. That testing will be conducted on a single network interface and same queue. Xskxceiver needs to be extended to distinguish between veth tests and physical interface tests. Since same iface/queue id pair will be used by both Tx/Rx threads for physical device testing, Tx thread, which happen to run after the Rx thread, is going to create XSK socket with shared umem flag. In order to track this setting throughout the lifetime of spawned threads, introduce 'shared_umem' boolean variable to struct ifobject and set it to true when xdpxceiver is run against physical device. In such case, UMEM size needs to be doubled, so half of it will be used by Rx thread and other half by Tx thread. For two step based test types, value of XSKMAP element under key 0 has to be updated as there is now another socket for the second step. Also, to avoid race conditions when destroying XSK resources, move this activity to the main thread after spawned Rx and Tx threads have finished its job. This way it is possible to gracefully remove shared umem without introducing synchronization mechanisms. To run xsk selftests suite on physical device, append "-i $IFACE" when invoking test_xsk.sh. For veth based tests, simply skip it. When "-i $IFACE" is in place, under the hood test_xsk.sh will use $IFACE for both interfaces supplied to xdpxceiver, which in turn will interpret that this execution of test suite is for a physical device. Note that currently this makes it possible only to test SKB and DRV mode (in case underlying device has native XDP support). ZC testing support is added in a later patch. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20220901114813.16275-5-maciej.fijalkowski@intel.com --- tools/testing/selftests/bpf/test_xsk.sh | 52 +++++--- tools/testing/selftests/bpf/xskxceiver.c | 204 ++++++++++++++++++++----------- tools/testing/selftests/bpf/xskxceiver.h | 1 + 3 files changed, 170 insertions(+), 87 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh index 096a957594cd..d821fd098504 100755 --- a/tools/testing/selftests/bpf/test_xsk.sh +++ b/tools/testing/selftests/bpf/test_xsk.sh @@ -73,14 +73,20 @@ # # Run and dump packet contents: # sudo ./test_xsk.sh -D +# +# Run test suite for physical device in loopback mode +# sudo ./test_xsk.sh -i IFACE . xsk_prereqs.sh -while getopts "vD" flag +ETH="" + +while getopts "vDi:" flag do case "${flag}" in v) verbose=1;; D) dump_pkts=1;; + i) ETH=${OPTARG};; esac done @@ -132,18 +138,25 @@ setup_vethPairs() { ip link set ${VETH0} up } -validate_root_exec -validate_veth_support ${VETH0} -validate_ip_utility -setup_vethPairs - -retval=$? -if [ $retval -ne 0 ]; then - test_status $retval "${TEST_NAME}" - cleanup_exit ${VETH0} ${VETH1} ${NS1} - exit $retval +if [ ! -z $ETH ]; then + VETH0=${ETH} + VETH1=${ETH} + NS1="" +else + validate_root_exec + validate_veth_support ${VETH0} + validate_ip_utility + setup_vethPairs + + retval=$? + if [ $retval -ne 0 ]; then + test_status $retval "${TEST_NAME}" + cleanup_exit ${VETH0} ${VETH1} ${NS1} + exit $retval + fi fi + if [[ $verbose -eq 1 ]]; then ARGS+="-v " fi @@ -152,26 +165,33 @@ if [[ $dump_pkts -eq 1 ]]; then ARGS="-D " fi +retval=$? test_status $retval "${TEST_NAME}" ## START TESTS statusList=() -TEST_NAME="XSK_SELFTESTS_SOFTIRQ" +TEST_NAME="XSK_SELFTESTS_${VETH0}_SOFTIRQ" exec_xskxceiver -cleanup_exit ${VETH0} ${VETH1} ${NS1} -TEST_NAME="XSK_SELFTESTS_BUSY_POLL" +if [ -z $ETH ]; then + cleanup_exit ${VETH0} ${VETH1} ${NS1} +fi +TEST_NAME="XSK_SELFTESTS_${VETH0}_BUSY_POLL" busy_poll=1 -setup_vethPairs +if [ -z $ETH ]; then + setup_vethPairs +fi exec_xskxceiver ## END TESTS -cleanup_exit ${VETH0} ${VETH1} ${NS1} +if [ -z $ETH ]; then + cleanup_exit ${VETH0} ${VETH1} ${NS1} +fi failures=0 echo -e "\nSummary:" diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index fbf65135fef6..b54b844cae89 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -301,8 +301,8 @@ static void enable_busy_poll(struct xsk_socket_info *xsk) exit_with_error(errno); } -static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, - struct ifobject *ifobject, bool shared) +static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, + struct ifobject *ifobject, bool shared) { struct xsk_socket_config cfg = {}; struct xsk_ring_cons *rxr; @@ -448,6 +448,9 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, memset(ifobj->umem, 0, sizeof(*ifobj->umem)); ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; + if (ifobj->shared_umem && ifobj->rx_on) + ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS * + XSK_UMEM__DEFAULT_FRAME_SIZE; for (j = 0; j < MAX_SOCKETS; j++) { memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); @@ -1146,6 +1149,70 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject) return TEST_PASS; } +static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, + struct xsk_umem_info *umem, bool tx) +{ + int i, ret; + + for (i = 0; i < test->nb_sockets; i++) { + bool shared = (ifobject->shared_umem && tx) ? true : !!i; + u32 ctr = 0; + + while (ctr++ < SOCK_RECONF_CTR) { + ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem, + ifobject, shared); + if (!ret) + break; + + /* Retry if it fails as xsk_socket__create() is asynchronous */ + if (ctr >= SOCK_RECONF_CTR) + exit_with_error(-ret); + usleep(USLEEP_MAX); + } + if (ifobject->busy_poll) + enable_busy_poll(&ifobject->xsk_arr[i]); + } +} + +static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) +{ + xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); + ifobject->xsk = &ifobject->xsk_arr[0]; + ifobject->xsk_map_fd = test->ifobj_rx->xsk_map_fd; + memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); +} + +static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) +{ + u32 idx = 0, i, buffers_to_fill; + int ret; + + if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) + buffers_to_fill = umem->num_frames; + else + buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; + + ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); + if (ret != buffers_to_fill) + exit_with_error(ENOSPC); + for (i = 0; i < buffers_to_fill; i++) { + u64 addr; + + if (pkt_stream->use_addr_for_fill) { + struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); + + if (!pkt) + break; + addr = pkt->addr; + } else { + addr = i * umem->frame_size; + } + + *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; + } + xsk_ring_prod__submit(&umem->fq, buffers_to_fill); +} + static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) { u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; @@ -1153,13 +1220,15 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) LIBBPF_OPTS(bpf_xdp_query_opts, opts); int ret, ifindex; void *bufs; - u32 i; ifobject->ns_fd = switch_namespace(ifobject->nsname); if (ifobject->umem->unaligned_mode) mmap_flags |= MAP_HUGETLB; + if (ifobject->shared_umem) + umem_sz *= 2; + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); if (bufs == MAP_FAILED) exit_with_error(errno); @@ -1168,24 +1237,9 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) if (ret) exit_with_error(-ret); - for (i = 0; i < test->nb_sockets; i++) { - u32 ctr = 0; - - while (ctr++ < SOCK_RECONF_CTR) { - ret = xsk_configure_socket(&ifobject->xsk_arr[i], ifobject->umem, - ifobject, !!i); - if (!ret) - break; - - /* Retry if it fails as xsk_socket__create() is asynchronous */ - if (ctr >= SOCK_RECONF_CTR) - exit_with_error(-ret); - usleep(USLEEP_MAX); - } + xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); - if (ifobject->busy_poll) - enable_busy_poll(&ifobject->xsk_arr[i]); - } + xsk_configure_socket(test, ifobject, ifobject->umem, false); ifobject->xsk = &ifobject->xsk_arr[0]; @@ -1221,22 +1275,18 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) exit_with_error(-ret); } -static void testapp_cleanup_xsk_res(struct ifobject *ifobj) -{ - print_verbose("Destroying socket\n"); - xsk_socket__delete(ifobj->xsk->xsk); - munmap(ifobj->umem->buffer, ifobj->umem->num_frames * ifobj->umem->frame_size); - xsk_umem__delete(ifobj->umem->umem); -} - static void *worker_testapp_validate_tx(void *arg) { struct test_spec *test = (struct test_spec *)arg; struct ifobject *ifobject = test->ifobj_tx; int err; - if (test->current_step == 1) - thread_common_ops(test, ifobject); + if (test->current_step == 1) { + if (!ifobject->shared_umem) + thread_common_ops(test, ifobject); + else + thread_common_ops_tx(test, ifobject); + } print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, ifobject->ifname); @@ -1247,53 +1297,23 @@ static void *worker_testapp_validate_tx(void *arg) if (err) report_failure(test); - if (test->total_steps == test->current_step || err) - testapp_cleanup_xsk_res(ifobject); pthread_exit(NULL); } -static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) -{ - u32 idx = 0, i, buffers_to_fill; - int ret; - - if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) - buffers_to_fill = umem->num_frames; - else - buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; - - ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); - if (ret != buffers_to_fill) - exit_with_error(ENOSPC); - for (i = 0; i < buffers_to_fill; i++) { - u64 addr; - - if (pkt_stream->use_addr_for_fill) { - struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); - - if (!pkt) - break; - addr = pkt->addr; - } else { - addr = i * umem->frame_size; - } - - *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; - } - xsk_ring_prod__submit(&umem->fq, buffers_to_fill); -} - static void *worker_testapp_validate_rx(void *arg) { struct test_spec *test = (struct test_spec *)arg; struct ifobject *ifobject = test->ifobj_rx; struct pollfd fds = { }; + int id = 0; int err; - if (test->current_step == 1) + if (test->current_step == 1) { thread_common_ops(test, ifobject); - - xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); + } else { + bpf_map_delete_elem(ifobject->xsk_map_fd, &id); + xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd); + } fds.fd = xsk_socket__fd(ifobject->xsk->xsk); fds.events = POLLIN; @@ -1311,25 +1331,38 @@ static void *worker_testapp_validate_rx(void *arg) pthread_mutex_unlock(&pacing_mutex); } - if (test->total_steps == test->current_step || err) - testapp_cleanup_xsk_res(ifobject); pthread_exit(NULL); } +static void testapp_clean_xsk_umem(struct ifobject *ifobj) +{ + u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; + + if (ifobj->shared_umem) + umem_sz *= 2; + + xsk_umem__delete(ifobj->umem->umem); + munmap(ifobj->umem->buffer, umem_sz); +} + static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj, enum test_type type) { + bool old_shared_umem = ifobj->shared_umem; pthread_t t0; if (pthread_barrier_init(&barr, NULL, 2)) exit_with_error(errno); test->current_step++; - if (type == TEST_TYPE_POLL_RXQ_TMOUT) + if (type == TEST_TYPE_POLL_RXQ_TMOUT) pkt_stream_reset(ifobj->pkt_stream); pkts_in_flight = 0; - /*Spawn thread */ + test->ifobj_rx->shared_umem = false; + test->ifobj_tx->shared_umem = false; + + /* Spawn thread */ pthread_create(&t0, NULL, ifobj->func_ptr, test); if (type != TEST_TYPE_POLL_TXQ_TMOUT) @@ -1340,6 +1373,14 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct pthread_join(t0, NULL); + if (test->total_steps == test->current_step || test->fail) { + xsk_socket__delete(ifobj->xsk->xsk); + testapp_clean_xsk_umem(ifobj); + } + + test->ifobj_rx->shared_umem = old_shared_umem; + test->ifobj_tx->shared_umem = old_shared_umem; + return !!test->fail; } @@ -1369,6 +1410,14 @@ static int testapp_validate_traffic(struct test_spec *test) pthread_join(t1, NULL); pthread_join(t0, NULL); + if (test->total_steps == test->current_step || test->fail) { + xsk_socket__delete(ifobj_tx->xsk->xsk); + xsk_socket__delete(ifobj_rx->xsk->xsk); + testapp_clean_xsk_umem(ifobj_rx); + if (!ifobj_tx->shared_umem) + testapp_clean_xsk_umem(ifobj_tx); + } + return !!test->fail; } @@ -1448,9 +1497,9 @@ static void testapp_headroom(struct test_spec *test) static void testapp_stats_rx_dropped(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_DROPPED"); + pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; - pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); pkt_stream_receive_half(test); test->ifobj_rx->validation_func = validate_rx_dropped; testapp_validate_traffic(test); @@ -1573,6 +1622,11 @@ static void testapp_invalid_desc(struct test_spec *test) pkts[7].valid = false; } + if (test->ifobj_tx->shared_umem) { + pkts[4].addr += UMEM_SIZE; + pkts[5].addr += UMEM_SIZE; + } + pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); testapp_validate_traffic(test); pkt_stream_restore_default(test); @@ -1769,6 +1823,7 @@ int main(int argc, char **argv) int modes = TEST_MODE_SKB + 1; u32 i, j, failed_tests = 0; struct test_spec test; + bool shared_umem; /* Use libbpf 1.0 API mode */ libbpf_set_strict_mode(LIBBPF_STRICT_ALL); @@ -1783,6 +1838,10 @@ int main(int argc, char **argv) setlocale(LC_ALL, ""); parse_command_line(ifobj_tx, ifobj_rx, argc, argv); + shared_umem = !strcmp(ifobj_tx->ifname, ifobj_rx->ifname); + + ifobj_tx->shared_umem = shared_umem; + ifobj_rx->shared_umem = shared_umem; if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { usage(basename(argv[0])); @@ -1819,6 +1878,9 @@ int main(int argc, char **argv) pkt_stream_delete(tx_pkt_stream_default); pkt_stream_delete(rx_pkt_stream_default); + free(ifobj_rx->umem); + if (!ifobj_tx->shared_umem) + free(ifobj_tx->umem); ifobject_delete(ifobj_tx); ifobject_delete(ifobj_rx); diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 04b298c72f67..11f017785986 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -153,6 +153,7 @@ struct ifobject { bool busy_poll; bool use_fill_ring; bool release_rx; + bool shared_umem; u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; }; -- cgit v1.2.3 From c29fe883defcbc6cd16176787a2084b8e05dabf0 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 1 Sep 2022 13:48:12 +0200 Subject: selftests/xsk: Make sure single threaded test terminates For single threaded poll tests call pthread_kill() from main thread so that we are sure worker thread has finished its job and it is possible to proceed with next test types from test suite. It was observed that on some platforms it takes a bit longer for worker thread to exit and next test case sees device as busy in this case. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20220901114813.16275-6-maciej.fijalkowski@intel.com --- tools/testing/selftests/bpf/xskxceiver.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index b54b844cae89..74b21ddf5a98 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1345,6 +1345,11 @@ static void testapp_clean_xsk_umem(struct ifobject *ifobj) munmap(ifobj->umem->buffer, umem_sz); } +static void handler(int signum) +{ + pthread_exit(NULL); +} + static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj, enum test_type type) { @@ -1362,6 +1367,7 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct test->ifobj_rx->shared_umem = false; test->ifobj_tx->shared_umem = false; + signal(SIGUSR1, handler); /* Spawn thread */ pthread_create(&t0, NULL, ifobj->func_ptr, test); @@ -1371,6 +1377,7 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct if (pthread_barrier_destroy(&barr)) exit_with_error(errno); + pthread_kill(t0, SIGUSR1); pthread_join(t0, NULL); if (test->total_steps == test->current_step || test->fail) { -- cgit v1.2.3 From fe2ad08e1e1df77b6941916b87d4871d751b88b6 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 1 Sep 2022 13:48:13 +0200 Subject: selftests/xsk: Add support for zero copy testing Introduce new mode to xdpxceiver responsible for testing AF_XDP zero copy support of driver that serves underlying physical device. When setting up test suite, determine whether driver has ZC support or not by trying to bind XSK ZC socket to the interface. If it succeeded, interpret it as ZC support being in place and do softirq and busy poll tests for zero copy mode. Note that Rx dropped tests are skipped since ZC path is not touching rx_dropped stat at all. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20220901114813.16275-7-maciej.fijalkowski@intel.com --- tools/testing/selftests/bpf/xskxceiver.c | 76 ++++++++++++++++++++++++++++++-- tools/testing/selftests/bpf/xskxceiver.h | 2 + 2 files changed, 74 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 74b21ddf5a98..ef33309bbe49 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -124,9 +124,20 @@ static void __exit_with_error(int error, const char *file, const char *func, int } #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) - -#define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV" #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : "" +static char *mode_string(struct test_spec *test) +{ + switch (test->mode) { + case TEST_MODE_SKB: + return "SKB"; + case TEST_MODE_DRV: + return "DRV"; + case TEST_MODE_ZC: + return "ZC"; + default: + return "BOGUS"; + } +} static void report_failure(struct test_spec *test) { @@ -322,6 +333,51 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i return xsk_socket__create(&xsk->xsk, ifobject->ifname, 0, umem->umem, rxr, txr, &cfg); } +static bool ifobj_zc_avail(struct ifobject *ifobject) +{ + size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; + int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; + struct xsk_socket_info *xsk; + struct xsk_umem_info *umem; + bool zc_avail = false; + void *bufs; + int ret; + + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); + if (bufs == MAP_FAILED) + exit_with_error(errno); + + umem = calloc(1, sizeof(struct xsk_umem_info)); + if (!umem) { + munmap(bufs, umem_sz); + exit_with_error(-ENOMEM); + } + umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; + ret = xsk_configure_umem(umem, bufs, umem_sz); + if (ret) + exit_with_error(-ret); + + xsk = calloc(1, sizeof(struct xsk_socket_info)); + if (!xsk) + goto out; + ifobject->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; + ifobject->xdp_flags |= XDP_FLAGS_DRV_MODE; + ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY; + ifobject->rx_on = true; + xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; + ret = __xsk_configure_socket(xsk, umem, ifobject, false); + if (!ret) + zc_avail = true; + + xsk_socket__delete(xsk->xsk); + free(xsk); +out: + munmap(umem->buffer, umem_sz); + xsk_umem__delete(umem->umem); + free(umem); + return zc_avail; +} + static struct option long_options[] = { {"interface", required_argument, 0, 'i'}, {"busy-poll", no_argument, 0, 'b'}, @@ -488,9 +544,14 @@ static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, else ifobj->xdp_flags |= XDP_FLAGS_DRV_MODE; - ifobj->bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY; + ifobj->bind_flags = XDP_USE_NEED_WAKEUP; + if (mode == TEST_MODE_ZC) + ifobj->bind_flags |= XDP_ZEROCOPY; + else + ifobj->bind_flags |= XDP_COPY; } + test->mode = mode; __test_spec_init(test, ifobj_tx, ifobj_rx); } @@ -1664,6 +1725,10 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ { switch (type) { case TEST_TYPE_STATS_RX_DROPPED: + if (mode == TEST_MODE_ZC) { + ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); + return; + } testapp_stats_rx_dropped(test); break; case TEST_TYPE_STATS_TX_INVALID_DESCS: @@ -1860,8 +1925,11 @@ int main(int argc, char **argv) init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, worker_testapp_validate_rx); - if (is_xdp_supported(ifobj_tx)) + if (is_xdp_supported(ifobj_tx)) { modes++; + if (ifobj_zc_avail(ifobj_tx)) + modes++; + } test_spec_init(&test, ifobj_tx, ifobj_rx, 0); tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 11f017785986..edb76d2def9f 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -62,6 +62,7 @@ enum test_mode { TEST_MODE_SKB, TEST_MODE_DRV, + TEST_MODE_ZC, TEST_MODE_MAX }; @@ -167,6 +168,7 @@ struct test_spec { u16 current_step; u16 nb_sockets; bool fail; + enum test_mode mode; char name[MAX_TEST_NAME_SIZE]; }; -- cgit v1.2.3 From afef88e65554c3e8691513b8350d6445e292560e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20M=C3=BCller?= Date: Thu, 1 Sep 2022 22:22:53 +0000 Subject: selftests/bpf: Store BPF object files with .bpf.o extension MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BPF object files are, in a way, the final artifact produced as part of the ahead-of-time compilation process. That makes them somewhat special compared to "regular" object files, which are a intermediate build artifacts that can typically be removed safely. As such, it can make sense to name them differently to make it easier to spot this difference at a glance. Among others, libbpf-bootstrap [0] has established the extension .bpf.o for BPF object files. It seems reasonable to follow this example and establish the same denomination for selftest build artifacts. To that end, this change adjusts the corresponding part of the build system and the test programs loading BPF object files to work with .bpf.o files. [0] https://github.com/libbpf/libbpf-bootstrap Suggested-by: Andrii Nakryiko Signed-off-by: Daniel Müller Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220901222253.1199242-1-deso@posteo.net --- tools/testing/selftests/bpf/Makefile | 36 +++++------ tools/testing/selftests/bpf/README.rst | 8 +-- tools/testing/selftests/bpf/get_cgroup_id_user.c | 2 +- .../testing/selftests/bpf/prog_tests/bpf_obj_id.c | 2 +- .../selftests/bpf/prog_tests/bpf_verif_scale.c | 54 ++++++++-------- tools/testing/selftests/bpf/prog_tests/btf.c | 4 +- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 6 +- .../testing/selftests/bpf/prog_tests/btf_endian.c | 2 +- .../selftests/bpf/prog_tests/connect_force_port.c | 2 +- .../testing/selftests/bpf/prog_tests/core_reloc.c | 74 +++++++++++----------- .../selftests/bpf/prog_tests/fexit_bpf2bpf.c | 44 ++++++------- .../selftests/bpf/prog_tests/get_stack_raw_tp.c | 4 +- .../testing/selftests/bpf/prog_tests/global_data.c | 2 +- .../selftests/bpf/prog_tests/global_data_init.c | 2 +- .../selftests/bpf/prog_tests/global_func_args.c | 2 +- tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 2 +- tools/testing/selftests/bpf/prog_tests/l4lb_all.c | 4 +- .../selftests/bpf/prog_tests/load_bytes_relative.c | 4 +- tools/testing/selftests/bpf/prog_tests/map_lock.c | 2 +- tools/testing/selftests/bpf/prog_tests/pinning.c | 4 +- .../testing/selftests/bpf/prog_tests/pkt_access.c | 2 +- .../selftests/bpf/prog_tests/pkt_md_access.c | 2 +- .../testing/selftests/bpf/prog_tests/probe_user.c | 2 +- .../selftests/bpf/prog_tests/queue_stack_map.c | 4 +- .../testing/selftests/bpf/prog_tests/rdonly_maps.c | 2 +- .../selftests/bpf/prog_tests/reference_tracking.c | 2 +- .../selftests/bpf/prog_tests/resolve_btfids.c | 2 +- .../selftests/bpf/prog_tests/select_reuseport.c | 4 +- tools/testing/selftests/bpf/prog_tests/sk_assign.c | 2 +- tools/testing/selftests/bpf/prog_tests/skb_ctx.c | 2 +- .../testing/selftests/bpf/prog_tests/skb_helpers.c | 2 +- .../selftests/bpf/prog_tests/sockopt_inherit.c | 2 +- .../selftests/bpf/prog_tests/sockopt_multi.c | 2 +- tools/testing/selftests/bpf/prog_tests/spinlock.c | 2 +- .../selftests/bpf/prog_tests/stacktrace_map.c | 2 +- .../bpf/prog_tests/stacktrace_map_raw_tp.c | 2 +- tools/testing/selftests/bpf/prog_tests/tailcalls.c | 36 +++++------ .../selftests/bpf/prog_tests/task_fd_query_rawtp.c | 2 +- .../selftests/bpf/prog_tests/task_fd_query_tp.c | 2 +- .../testing/selftests/bpf/prog_tests/tcp_estats.c | 2 +- .../selftests/bpf/prog_tests/test_global_funcs.c | 34 +++++----- .../selftests/bpf/prog_tests/test_overhead.c | 2 +- .../selftests/bpf/prog_tests/tp_attach_query.c | 2 +- .../selftests/bpf/prog_tests/trampoline_count.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp.c | 2 +- .../selftests/bpf/prog_tests/xdp_adjust_frags.c | 2 +- .../selftests/bpf/prog_tests/xdp_adjust_tail.c | 10 +-- .../testing/selftests/bpf/prog_tests/xdp_attach.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_info.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_perf.c | 2 +- .../selftests/bpf/prog_tests/xdp_synproxy.c | 2 +- tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c | 8 +-- tools/testing/selftests/bpf/test_dev_cgroup.c | 2 +- tools/testing/selftests/bpf/test_lirc_mode2_user.c | 2 +- tools/testing/selftests/bpf/test_maps.c | 10 +-- tools/testing/selftests/bpf/test_offload.py | 22 +++---- tools/testing/selftests/bpf/test_skb_cgroup_id.sh | 2 +- tools/testing/selftests/bpf/test_sock_addr.c | 16 ++--- tools/testing/selftests/bpf/test_sockmap.c | 4 +- tools/testing/selftests/bpf/test_sysctl.c | 6 +- .../selftests/bpf/test_tcp_check_syncookie.sh | 2 +- tools/testing/selftests/bpf/test_tcpnotify_user.c | 2 +- tools/testing/selftests/bpf/test_xdp_redirect.sh | 8 +-- .../selftests/bpf/test_xdp_redirect_multi.sh | 2 +- tools/testing/selftests/bpf/test_xdp_veth.sh | 8 +-- tools/testing/selftests/bpf/xdp_redirect_multi.c | 2 +- tools/testing/selftests/bpf/xdp_synproxy.c | 2 +- tools/testing/selftests/bpf/xdping.c | 2 +- 68 files changed, 250 insertions(+), 250 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index eecad99f1735..c10adecb5a73 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -45,7 +45,7 @@ ifneq ($(BPF_GCC),) TEST_GEN_PROGS += test_progs-bpf_gcc endif -TEST_GEN_FILES = test_lwt_ip_encap.o test_tc_edt.o +TEST_GEN_FILES = test_lwt_ip_encap.bpf.o test_tc_edt.bpf.o TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c) # Order correspond to 'make run_tests' order @@ -358,17 +358,17 @@ LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c SKEL_BLACKLIST += $$(LSKELS) -test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o -linked_funcs.skel.h-deps := linked_funcs1.o linked_funcs2.o -linked_vars.skel.h-deps := linked_vars1.o linked_vars2.o -linked_maps.skel.h-deps := linked_maps1.o linked_maps2.o +test_static_linked.skel.h-deps := test_static_linked1.bpf.o test_static_linked2.bpf.o +linked_funcs.skel.h-deps := linked_funcs1.bpf.o linked_funcs2.bpf.o +linked_vars.skel.h-deps := linked_vars1.bpf.o linked_vars2.bpf.o +linked_maps.skel.h-deps := linked_maps1.bpf.o linked_maps2.bpf.o # In the subskeleton case, we want the test_subskeleton_lib.subskel.h file # but that's created as a side-effect of the skel.h generation. -test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o -test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o -test_usdt.skel.h-deps := test_usdt.o test_usdt_multispec.o +test_subskeleton.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o test_subskeleton.bpf.o +test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o +test_usdt.skel.h-deps := test_usdt.bpf.o test_usdt_multispec.bpf.o -LINKED_BPF_SRCS := $(patsubst %.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps))) +LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps))) # Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on # $eval()) and pass control to DEFINE_TEST_RUNNER_RULES. @@ -386,7 +386,7 @@ TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \ TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES)) TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c)) -TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)) +TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.bpf.o, $$(TRUNNER_BPF_SRCS)) TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \ $$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\ $$(TRUNNER_BPF_SRCS))) @@ -416,7 +416,7 @@ endif # input/output directory combination ifeq ($($(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs),) $(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs := y -$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \ +$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \ $(TRUNNER_BPF_PROGS_DIR)/%.c \ $(TRUNNER_BPF_PROGS_DIR)/*.h \ $$(INCLUDE_DIR)/vmlinux.h \ @@ -426,25 +426,25 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \ $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ $(TRUNNER_BPF_CFLAGS)) -$(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT) +$(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$< $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o) $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o) $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o) - $(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@ - $(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$(@:.skel.h=.subskel.h) + $(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$@ + $(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$(@:.skel.h=.subskel.h) -$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT) +$(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked1.o) $$< $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked2.o) $$(<:.o=.llinked1.o) $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o) $(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o) - $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.o=_lskel)) > $$@ + $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) - $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o)) + $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.bpf.o)) $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps)) $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked1.o) $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked3.o) $$(@:.skel.h=.linked2.o) @@ -500,7 +500,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ - $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.o $$@ + $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool $(if $2,$2/)bpftool endef diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index eb1b7541f39d..d3c6b3da0bb1 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -126,11 +126,11 @@ available in 10.0.1. The patch is available in llvm 11.0.0 trunk. __ https://reviews.llvm.org/D78466 -bpf_verif_scale/loop6.o test failure with Clang 12 -================================================== +bpf_verif_scale/loop6.bpf.o test failure with Clang 12 +====================================================== With Clang 12, the following bpf_verif_scale test failed: - * ``bpf_verif_scale/loop6.o`` + * ``bpf_verif_scale/loop6.bpf.o`` The verifier output looks like @@ -245,7 +245,7 @@ See `kernel llvm reloc`_ for more explanation and some examples. Using clang 13 to compile old libbpf which has static linker support, there will be a compilation failure:: - libbpf: ELF relo #0 in section #6 has unexpected type 2 in .../bpf_tcp_nogpl.o + libbpf: ELF relo #0 in section #6 has unexpected type 2 in .../bpf_tcp_nogpl.bpf.o Here, ``type 2`` refers to new relocation type ``R_BPF_64_ABS64``. To fix this issue, user newer libbpf. diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c index e021cc67dc02..156743cf5870 100644 --- a/tools/testing/selftests/bpf/get_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c @@ -48,7 +48,7 @@ static int bpf_find_map(const char *test, struct bpf_object *obj, int main(int argc, char **argv) { const char *probe_name = "syscalls/sys_enter_nanosleep"; - const char *file = "get_cgroup_id_kern.o"; + const char *file = "get_cgroup_id_kern.bpf.o"; int err, bytes, efd, prog_fd, pmu_fd; int cgroup_fd, cgidmap_fd, pidmap_fd; struct perf_event_attr attr = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index dbe56fa8582d..e1c1e521cca2 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -7,7 +7,7 @@ void serial_test_bpf_obj_id(void) { const __u64 array_magic_value = 0xfaceb00c; const __u32 array_key = 0; - const char *file = "./test_obj_id.o"; + const char *file = "./test_obj_id.bpf.o"; const char *expected_prog_name = "test_obj_id"; const char *expected_map_name = "test_map_id"; const __u64 nsec_per_sec = 1000000000; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index ff6cce9fef06..5ca252823294 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -75,45 +75,45 @@ static void scale_test(const char *file, void test_verif_scale1() { - scale_test("test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS, false); + scale_test("test_verif_scale1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); } void test_verif_scale2() { - scale_test("test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS, false); + scale_test("test_verif_scale2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); } void test_verif_scale3() { - scale_test("test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS, false); + scale_test("test_verif_scale3.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); } void test_verif_scale_pyperf_global() { - scale_test("pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf_global.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_pyperf_subprogs() { - scale_test("pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_pyperf50() { /* full unroll by llvm */ - scale_test("pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf50.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_pyperf100() { /* full unroll by llvm */ - scale_test("pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf100.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_pyperf180() { /* full unroll by llvm */ - scale_test("pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf180.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_pyperf600() @@ -124,13 +124,13 @@ void test_verif_scale_pyperf600() * 16k insns in loop body. * Total of 5 such loops. Total program size ~82k insns. */ - scale_test("pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf600.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_pyperf600_bpf_loop(void) { /* use the bpf_loop helper*/ - scale_test("pyperf600_bpf_loop.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf600_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_pyperf600_nounroll() @@ -141,37 +141,37 @@ void test_verif_scale_pyperf600_nounroll() * ~110 insns in loop body. * Total of 5 such loops. Total program size ~1500 insns. */ - scale_test("pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("pyperf600_nounroll.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_loop1() { - scale_test("loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("loop1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_loop2() { - scale_test("loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("loop2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_loop3_fail() { - scale_test("loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */); + scale_test("loop3.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */); } void test_verif_scale_loop4() { - scale_test("loop4.o", BPF_PROG_TYPE_SCHED_CLS, false); + scale_test("loop4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); } void test_verif_scale_loop5() { - scale_test("loop5.o", BPF_PROG_TYPE_SCHED_CLS, false); + scale_test("loop5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); } void test_verif_scale_loop6() { - scale_test("loop6.o", BPF_PROG_TYPE_KPROBE, false); + scale_test("loop6.bpf.o", BPF_PROG_TYPE_KPROBE, false); } void test_verif_scale_strobemeta() @@ -180,54 +180,54 @@ void test_verif_scale_strobemeta() * Total program size 20.8k insn. * ~350k processed_insns */ - scale_test("strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("strobemeta.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_strobemeta_bpf_loop(void) { /* use the bpf_loop helper*/ - scale_test("strobemeta_bpf_loop.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("strobemeta_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_strobemeta_nounroll1() { /* no unroll, tiny loops */ - scale_test("strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("strobemeta_nounroll1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_strobemeta_nounroll2() { /* no unroll, tiny loops */ - scale_test("strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("strobemeta_nounroll2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_strobemeta_subprogs() { /* non-inlined subprogs */ - scale_test("strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); + scale_test("strobemeta_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } void test_verif_scale_sysctl_loop1() { - scale_test("test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); + scale_test("test_sysctl_loop1.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); } void test_verif_scale_sysctl_loop2() { - scale_test("test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); + scale_test("test_sysctl_loop2.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); } void test_verif_scale_xdp_loop() { - scale_test("test_xdp_loop.o", BPF_PROG_TYPE_XDP, false); + scale_test("test_xdp_loop.bpf.o", BPF_PROG_TYPE_XDP, false); } void test_verif_scale_seg6_loop() { - scale_test("test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false); + scale_test("test_seg6_loop.bpf.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false); } void test_verif_twfw() { - scale_test("twfw.o", BPF_PROG_TYPE_CGROUP_SKB, false); + scale_test("twfw.bpf.o", BPF_PROG_TYPE_CGROUP_SKB, false); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index ef6528b8084c..127b8caa3dc1 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4651,8 +4651,8 @@ struct btf_file_test { }; static struct btf_file_test file_tests[] = { - { .file = "test_btf_newkv.o", }, - { .file = "test_btf_nokv.o", .btf_kv_notfound = true, }, + { .file = "test_btf_newkv.bpf.o", }, + { .file = "test_btf_nokv.bpf.o", .btf_kv_notfound = true, }, }; static void do_test_file(unsigned int test_num) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 7b5bbe21b549..b1ca954ed1e5 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -52,7 +52,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t) int err = 0, fd = -1; FILE *f = NULL; - snprintf(test_file, sizeof(test_file), "%s.o", t->file); + snprintf(test_file, sizeof(test_file), "%s.bpf.o", t->file); btf = btf__parse_elf(test_file, NULL); if (!ASSERT_OK_PTR(btf, "btf_parse_elf")) { @@ -841,8 +841,8 @@ static void test_btf_dump_datasec_data(char *str) char license[4] = "GPL"; struct btf_dump *d; - btf = btf__parse("xdping_kern.o", NULL); - if (!ASSERT_OK_PTR(btf, "xdping_kern.o BTF not found")) + btf = btf__parse("xdping_kern.bpf.o", NULL); + if (!ASSERT_OK_PTR(btf, "xdping_kern.bpf.o BTF not found")) return; d = btf_dump__new(btf, btf_dump_snprintf, str, NULL); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c index 8afbf3d0b89a..5b9f84dbeb43 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_endian.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c @@ -23,7 +23,7 @@ void test_btf_endian() { int var_id; /* Load BTF in native endianness */ - btf = btf__parse_elf("btf_dump_test_case_syntax.o", NULL); + btf = btf__parse_elf("btf_dump_test_case_syntax.bpf.o", NULL); if (!ASSERT_OK_PTR(btf, "parse_native_btf")) goto err_out; diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c index 9c4325f4aef2..24d553109f8d 100644 --- a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c +++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c @@ -53,7 +53,7 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type) __u16 expected_peer_port = 60000; struct bpf_program *prog; struct bpf_object *obj; - const char *obj_file = v4 ? "connect_force_port4.o" : "connect_force_port6.o"; + const char *obj_file = v4 ? "connect_force_port4.bpf.o" : "connect_force_port6.bpf.o"; int fd, err; __u32 duration = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index c8655ba9a88f..47f42e680105 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -13,7 +13,7 @@ static int duration = 0; #define MODULES_CASE(name, pg_name, tp_name) { \ .case_name = name, \ - .bpf_obj_file = "test_core_reloc_module.o", \ + .bpf_obj_file = "test_core_reloc_module.bpf.o", \ .btf_src_file = NULL, /* find in kernel module BTFs */ \ .input = "", \ .input_len = 0, \ @@ -43,8 +43,8 @@ static int duration = 0; #define FLAVORS_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_flavors.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_flavors.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_flavors" \ @@ -68,8 +68,8 @@ static int duration = 0; #define NESTING_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_nesting.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_nesting.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_nesting" \ @@ -96,8 +96,8 @@ static int duration = 0; #define ARRAYS_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_arrays.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_arrays.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_arrays" \ @@ -130,8 +130,8 @@ static int duration = 0; #define PRIMITIVES_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_primitives.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_primitives.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_primitives" \ @@ -150,8 +150,8 @@ static int duration = 0; #define MODS_CASE(name) { \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_mods.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_mods.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) { \ .a = 1, \ .b = 2, \ @@ -174,8 +174,8 @@ static int duration = 0; #define PTR_AS_ARR_CASE(name) { \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_ptr_as_arr.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_ptr_as_arr.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .input = (const char *)&(struct core_reloc_##name []){ \ { .a = 1 }, \ { .a = 2 }, \ @@ -203,8 +203,8 @@ static int duration = 0; #define INTS_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_ints.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_ints.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_ints" @@ -223,18 +223,18 @@ static int duration = 0; #define FIELD_EXISTS_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_existence.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_existence.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_existence" #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \ .case_name = test_name_prefix#name, \ .bpf_obj_file = objfile, \ - .btf_src_file = "btf__core_reloc_" #name ".o" + .btf_src_file = "btf__core_reloc_" #name ".bpf.o" #define BITFIELDS_CASE(name, ...) { \ - BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \ "probed:", name), \ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ .input_len = sizeof(struct core_reloc_##name), \ @@ -244,7 +244,7 @@ static int duration = 0; .raw_tp_name = "sys_enter", \ .prog_name = "test_core_bitfields", \ }, { \ - BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \ "direct:", name), \ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ .input_len = sizeof(struct core_reloc_##name), \ @@ -256,14 +256,14 @@ static int duration = 0; #define BITFIELDS_ERR_CASE(name) { \ - BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \ "probed:", name), \ .fails = true, \ - .run_btfgen_fails = true, \ + .run_btfgen_fails = true, \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_bitfields", \ }, { \ - BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \ "direct:", name), \ .fails = true, \ .run_btfgen_fails = true, \ @@ -272,8 +272,8 @@ static int duration = 0; #define SIZE_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_size.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_size.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_size" @@ -307,13 +307,13 @@ static int duration = 0; #define SIZE_ERR_CASE(name) { \ SIZE_CASE_COMMON(name), \ .fails = true, \ - .run_btfgen_fails = true, \ + .run_btfgen_fails = true, \ } #define TYPE_BASED_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_type_based.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_type_based.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_type_based" @@ -331,8 +331,8 @@ static int duration = 0; #define TYPE_ID_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_type_id.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_type_id.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_type_id" @@ -350,8 +350,8 @@ static int duration = 0; #define ENUMVAL_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_enumval.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_enumval.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_enumval" @@ -369,8 +369,8 @@ static int duration = 0; #define ENUM64VAL_CASE_COMMON(name) \ .case_name = #name, \ - .bpf_obj_file = "test_core_reloc_enum64val.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ + .bpf_obj_file = "test_core_reloc_enum64val.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ .raw_tp_name = "sys_enter", \ .prog_name = "test_core_enum64val" @@ -547,7 +547,7 @@ static const struct core_reloc_test_case test_cases[] = { /* validate we can find kernel image and use its BTF for relocs */ { .case_name = "kernel", - .bpf_obj_file = "test_core_reloc_kernel.o", + .bpf_obj_file = "test_core_reloc_kernel.bpf.o", .btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */ .input = "", .input_len = 0, @@ -629,8 +629,8 @@ static const struct core_reloc_test_case test_cases[] = { /* validate edge cases of capturing relocations */ { .case_name = "misc", - .bpf_obj_file = "test_core_reloc_misc.o", - .btf_src_file = "btf__core_reloc_misc.o", + .bpf_obj_file = "test_core_reloc_misc.bpf.o", + .btf_src_file = "btf__core_reloc_misc.bpf.o", .input = (const char *)&(struct core_reloc_misc_extensible[]){ { .a = 1 }, { .a = 2 }, /* not read */ diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index da860b07abb5..d1e32e792536 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -174,8 +174,8 @@ static void test_target_no_callees(void) const char *prog_name[] = { "fexit/test_pkt_md_access", }; - test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.o", - "./test_pkt_md_access.o", + test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.bpf.o", + "./test_pkt_md_access.bpf.o", ARRAY_SIZE(prog_name), prog_name, true, NULL); } @@ -188,8 +188,8 @@ static void test_target_yes_callees(void) "fexit/test_pkt_access_subprog2", "fexit/test_pkt_access_subprog3", }; - test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o", - "./test_pkt_access.o", + test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o", + "./test_pkt_access.bpf.o", ARRAY_SIZE(prog_name), prog_name, true, NULL); } @@ -206,8 +206,8 @@ static void test_func_replace(void) "freplace/get_constant", "freplace/test_pkt_write_access_subprog", }; - test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o", - "./test_pkt_access.o", + test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o", + "./test_pkt_access.bpf.o", ARRAY_SIZE(prog_name), prog_name, true, NULL); } @@ -217,8 +217,8 @@ static void test_func_replace_verify(void) const char *prog_name[] = { "freplace/do_bind", }; - test_fexit_bpf2bpf_common("./freplace_connect4.o", - "./connect4_prog.o", + test_fexit_bpf2bpf_common("./freplace_connect4.bpf.o", + "./connect4_prog.bpf.o", ARRAY_SIZE(prog_name), prog_name, false, NULL); } @@ -227,7 +227,7 @@ static int test_second_attach(struct bpf_object *obj) { const char *prog_name = "security_new_get_constant"; const char *tgt_name = "get_constant"; - const char *tgt_obj_file = "./test_pkt_access.o"; + const char *tgt_obj_file = "./test_pkt_access.bpf.o"; struct bpf_program *prog = NULL; struct bpf_object *tgt_obj; struct bpf_link *link; @@ -272,8 +272,8 @@ static void test_func_replace_multi(void) const char *prog_name[] = { "freplace/get_constant", }; - test_fexit_bpf2bpf_common("./freplace_get_constant.o", - "./test_pkt_access.o", + test_fexit_bpf2bpf_common("./freplace_get_constant.bpf.o", + "./test_pkt_access.bpf.o", ARRAY_SIZE(prog_name), prog_name, true, test_second_attach); } @@ -281,10 +281,10 @@ static void test_func_replace_multi(void) static void test_fmod_ret_freplace(void) { struct bpf_object *freplace_obj = NULL, *pkt_obj, *fmod_obj = NULL; - const char *freplace_name = "./freplace_get_constant.o"; - const char *fmod_ret_name = "./fmod_ret_freplace.o"; + const char *freplace_name = "./freplace_get_constant.bpf.o"; + const char *fmod_ret_name = "./fmod_ret_freplace.bpf.o"; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); - const char *tgt_name = "./test_pkt_access.o"; + const char *tgt_name = "./test_pkt_access.bpf.o"; struct bpf_link *freplace_link = NULL; struct bpf_program *prog; __u32 duration = 0; @@ -339,8 +339,8 @@ static void test_func_sockmap_update(void) const char *prog_name[] = { "freplace/cls_redirect", }; - test_fexit_bpf2bpf_common("./freplace_cls_redirect.o", - "./test_cls_redirect.o", + test_fexit_bpf2bpf_common("./freplace_cls_redirect.bpf.o", + "./test_cls_redirect.bpf.o", ARRAY_SIZE(prog_name), prog_name, false, NULL); } @@ -385,15 +385,15 @@ close_prog: static void test_func_replace_return_code(void) { /* test invalid return code in the replaced program */ - test_obj_load_failure_common("./freplace_connect_v4_prog.o", - "./connect4_prog.o"); + test_obj_load_failure_common("./freplace_connect_v4_prog.bpf.o", + "./connect4_prog.bpf.o"); } static void test_func_map_prog_compatibility(void) { /* test with spin lock map value in the replaced program */ - test_obj_load_failure_common("./freplace_attach_probe.o", - "./test_attach_probe.o"); + test_obj_load_failure_common("./freplace_attach_probe.bpf.o", + "./test_attach_probe.bpf.o"); } static void test_func_replace_global_func(void) @@ -402,8 +402,8 @@ static void test_func_replace_global_func(void) "freplace/test_pkt_access", }; - test_fexit_bpf2bpf_common("./freplace_global_func.o", - "./test_pkt_access.o", + test_fexit_bpf2bpf_common("./freplace_global_func.bpf.o", + "./test_pkt_access.bpf.o", ARRAY_SIZE(prog_name), prog_name, false, NULL); } diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index 16048978a1ef..858e0575f502 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -84,8 +84,8 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) void test_get_stack_raw_tp(void) { - const char *file = "./test_get_stack_rawtp.o"; - const char *file_err = "./test_get_stack_rawtp_err.o"; + const char *file = "./test_get_stack_rawtp.bpf.o"; + const char *file_err = "./test_get_stack_rawtp_err.bpf.o"; const char *prog_name = "bpf_prog1"; int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; struct perf_buffer *pb = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index 027685858925..fadfb64e2a71 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -131,7 +131,7 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) void test_global_data(void) { - const char *file = "./test_global_data.o"; + const char *file = "./test_global_data.bpf.o"; struct bpf_object *obj; int err, prog_fd; LIBBPF_OPTS(bpf_test_run_opts, topts, diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c index 57331c606964..8466332d7406 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c @@ -3,7 +3,7 @@ void test_global_data_init(void) { - const char *file = "./test_global_data.o"; + const char *file = "./test_global_data.bpf.o"; int err = -ENOMEM, map_fd, zero = 0; __u8 *buff = NULL, *newval = NULL; struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_args.c b/tools/testing/selftests/bpf/prog_tests/global_func_args.c index 29039a36cce5..d997099f62d0 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_func_args.c +++ b/tools/testing/selftests/bpf/prog_tests/global_func_args.c @@ -39,7 +39,7 @@ static void test_global_func_args0(struct bpf_object *obj) void test_global_func_args(void) { - const char *file = "./test_global_func_args.o"; + const char *file = "./test_global_func_args.bpf.o"; struct bpf_object *obj; int err, prog_fd; LIBBPF_OPTS(bpf_test_run_opts, topts, diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 1cee6957285e..73579370bfbd 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -69,7 +69,7 @@ void serial_test_kfree_skb(void) const int zero = 0; bool test_ok[2]; - err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c index 55f733ff4109..9c1a18573ffd 100644 --- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -90,7 +90,7 @@ out: void test_l4lb_all(void) { if (test__start_subtest("l4lb_inline")) - test_l4lb("test_l4lb.o"); + test_l4lb("test_l4lb.bpf.o"); if (test__start_subtest("l4lb_noinline")) - test_l4lb("test_l4lb_noinline.o"); + test_l4lb("test_l4lb_noinline.bpf.o"); } diff --git a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c index 4e0b2ec057aa..581c0eb0a0a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c +++ b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c @@ -27,8 +27,8 @@ void test_load_bytes_relative(void) if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; - err = bpf_prog_test_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB, - &obj, &prog_fd); + err = bpf_prog_test_load("./load_bytes_relative.bpf.o", BPF_PROG_TYPE_CGROUP_SKB, + &obj, &prog_fd); if (CHECK_FAIL(err)) goto close_server_fd; diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c index e4e99b37df64..1d6726f01dd2 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -49,7 +49,7 @@ out: void test_map_lock(void) { - const char *file = "./test_map_lock.o"; + const char *file = "./test_map_lock.bpf.o"; int prog_fd, map_fd[2], vars[17] = {}; pthread_t thread_id[6]; struct bpf_object *obj = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c index 31c09ba577eb..d95cee5867b7 100644 --- a/tools/testing/selftests/bpf/prog_tests/pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -26,13 +26,13 @@ __u32 get_map_id(struct bpf_object *obj, const char *name) void test_pinning(void) { - const char *file_invalid = "./test_pinning_invalid.o"; + const char *file_invalid = "./test_pinning_invalid.bpf.o"; const char *custpinpath = "/sys/fs/bpf/custom/pinmap"; const char *nopinpath = "/sys/fs/bpf/nopinmap"; const char *nopinpath2 = "/sys/fs/bpf/nopinmap2"; const char *custpath = "/sys/fs/bpf/custom"; const char *pinpath = "/sys/fs/bpf/pinmap"; - const char *file = "./test_pinning.o"; + const char *file = "./test_pinning.bpf.o"; __u32 map_id, map_id2, duration = 0; struct stat statbuf = {}; struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c index 0bcccdc34fbc..682e4ff45b01 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -4,7 +4,7 @@ void test_pkt_access(void) { - const char *file = "./test_pkt_access.o"; + const char *file = "./test_pkt_access.bpf.o"; struct bpf_object *obj; int err, prog_fd; LIBBPF_OPTS(bpf_test_run_opts, topts, diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c index 00ee1dd792aa..0d85e0642811 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -4,7 +4,7 @@ void test_pkt_md_access(void) { - const char *file = "./test_pkt_md_access.o"; + const char *file = "./test_pkt_md_access.bpf.o"; struct bpf_object *obj; int err, prog_fd; LIBBPF_OPTS(bpf_test_run_opts, topts, diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c index 34dbd2adc157..8721671321de 100644 --- a/tools/testing/selftests/bpf/prog_tests/probe_user.c +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -11,7 +11,7 @@ void serial_test_probe_user(void) #endif }; enum { prog_count = ARRAY_SIZE(prog_names) }; - const char *obj_file = "./test_probe_user.o"; + const char *obj_file = "./test_probe_user.bpf.o"; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, ); int err, results_map_fd, sock_fd, duration = 0; struct sockaddr curr, orig, tmp; diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index d2743fc10032..722c5f2a7776 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -28,9 +28,9 @@ static void test_queue_stack_map_by_type(int type) vals[i] = rand(); if (type == QUEUE) - strncpy(file, "./test_queue_map.o", sizeof(file)); + strncpy(file, "./test_queue_map.bpf.o", sizeof(file)); else if (type == STACK) - strncpy(file, "./test_stack_map.o", sizeof(file)); + strncpy(file, "./test_stack_map.bpf.o", sizeof(file)); else return; diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c index fd5d2ddfb062..19e2f2526dbd 100644 --- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c +++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c @@ -16,7 +16,7 @@ struct rdonly_map_subtest { void test_rdonly_maps(void) { - const char *file = "test_rdonly_maps.o"; + const char *file = "test_rdonly_maps.bpf.o"; struct rdonly_map_subtest subtests[] = { { "skip loop", "skip_loop", 0, 0 }, { "part loop", "part_loop", 3, 2 + 3 + 4 }, diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c index 739d2ea6ca55..d863205bbe95 100644 --- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -3,7 +3,7 @@ void test_reference_tracking(void) { - const char *file = "test_sk_lookup_kern.o"; + const char *file = "test_sk_lookup_kern.bpf.o"; const char *obj_name = "ref_track"; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = obj_name, diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index c197261d02e2..f81d08d429a2 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -101,7 +101,7 @@ static int resolve_symbols(void) int type_id; __u32 nr; - btf = btf__parse_elf("btf_data.o", NULL); + btf = btf__parse_elf("btf_data.bpf.o", NULL); if (CHECK(libbpf_get_error(btf), "resolve", "Failed to load BTF from btf_data.o\n")) return -1; diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index 1cbd8cd64044..64c5f5eb2994 100644 --- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -91,9 +91,9 @@ static int prepare_bpf_obj(void) struct bpf_map *map; int err; - obj = bpf_object__open("test_select_reuseport_kern.o"); + obj = bpf_object__open("test_select_reuseport_kern.bpf.o"); err = libbpf_get_error(obj); - RET_ERR(err, "open test_select_reuseport_kern.o", + RET_ERR(err, "open test_select_reuseport_kern.bpf.o", "obj:%p PTR_ERR(obj):%d\n", obj, err); map = bpf_object__find_map_by_name(obj, "outer_map"); diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c index 1d272e05188e..3e190ed63976 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -47,7 +47,7 @@ configure_stack(void) if (CHECK_FAIL(system("tc qdisc add dev lo clsact"))) return false; sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf", - "direct-action object-file ./test_sk_assign.o", + "direct-action object-file ./test_sk_assign.bpf.o", "section tc", (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose"); if (CHECK(system(tc_cmd), "BPF load failed;", diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index ce0e555b5e38..33f950e2dae3 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -31,7 +31,7 @@ void test_skb_ctx(void) struct bpf_object *obj; int err, prog_fd, i; - err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, + err = bpf_prog_test_load("./test_skb_ctx.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (!ASSERT_OK(err, "load")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c index 97dc8b14be48..f7ee25f290f7 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c @@ -20,7 +20,7 @@ void test_skb_helpers(void) struct bpf_object *obj; int err, prog_fd; - err = bpf_prog_test_load("./test_skb_helpers.o", + err = bpf_prog_test_load("./test_skb_helpers.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (!ASSERT_OK(err, "load")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 8ed78a9383ba..c5cb6e8374b6 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -174,7 +174,7 @@ static void run_test(int cgroup_fd) pthread_t tid; int err; - obj = bpf_object__open_file("sockopt_inherit.o", NULL); + obj = bpf_object__open_file("sockopt_inherit.bpf.o", NULL); if (!ASSERT_OK_PTR(obj, "obj_open")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index abce12ddcc37..28d592dc54a7 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -310,7 +310,7 @@ void test_sockopt_multi(void) if (CHECK_FAIL(cg_child < 0)) goto out; - obj = bpf_object__open_file("sockopt_multi.o", NULL); + obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL); if (!ASSERT_OK_PTR(obj, "obj_load")) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c index 8e329eaee6d7..15eb1372d771 100644 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -19,7 +19,7 @@ static void *spin_lock_thread(void *arg) void test_spinlock(void) { - const char *file = "./test_spin_lock.o"; + const char *file = "./test_spin_lock.bpf.o"; pthread_t thread_id[4]; struct bpf_object *obj = NULL; int prog_fd; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c index 313f0a66232e..df59e4ae2951 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c @@ -6,7 +6,7 @@ void test_stacktrace_map(void) int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; const char *prog_name = "oncpu"; int err, prog_fd, stack_trace_len; - const char *file = "./test_stacktrace_map.o"; + const char *file = "./test_stacktrace_map.bpf.o"; __u32 key, val, duration = 0; struct bpf_program *prog; struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c index 1cb8dd36bd8f..c6ef06f55cdb 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c @@ -5,7 +5,7 @@ void test_stacktrace_map_raw_tp(void) { const char *prog_name = "oncpu"; int control_map_fd, stackid_hmap_fd, stackmap_fd; - const char *file = "./test_stacktrace_map.o"; + const char *file = "./test_stacktrace_map.bpf.o"; __u32 key, val, duration = 0; int err, prog_fd; struct bpf_program *prog; diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 19c70880cfb3..58fe2c586ed7 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -20,8 +20,8 @@ static void test_tailcall_1(void) .repeat = 1, ); - err = bpf_prog_test_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, - &prog_fd); + err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); if (CHECK_FAIL(err)) return; @@ -156,8 +156,8 @@ static void test_tailcall_2(void) .repeat = 1, ); - err = bpf_prog_test_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, - &prog_fd); + err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); if (CHECK_FAIL(err)) return; @@ -299,7 +299,7 @@ out: */ static void test_tailcall_3(void) { - test_tailcall_count("tailcall3.o"); + test_tailcall_count("tailcall3.bpf.o"); } /* test_tailcall_6 checks that the count value of the tail call limit @@ -307,7 +307,7 @@ static void test_tailcall_3(void) */ static void test_tailcall_6(void) { - test_tailcall_count("tailcall6.o"); + test_tailcall_count("tailcall6.bpf.o"); } /* test_tailcall_4 checks that the kernel properly selects indirect jump @@ -329,8 +329,8 @@ static void test_tailcall_4(void) .repeat = 1, ); - err = bpf_prog_test_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, - &prog_fd); + err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); if (CHECK_FAIL(err)) return; @@ -419,8 +419,8 @@ static void test_tailcall_5(void) .repeat = 1, ); - err = bpf_prog_test_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj, - &prog_fd); + err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); if (CHECK_FAIL(err)) return; @@ -507,8 +507,8 @@ static void test_tailcall_bpf2bpf_1(void) .repeat = 1, ); - err = bpf_prog_test_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS, - &obj, &prog_fd); + err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -591,8 +591,8 @@ static void test_tailcall_bpf2bpf_2(void) .repeat = 1, ); - err = bpf_prog_test_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS, - &obj, &prog_fd); + err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -671,8 +671,8 @@ static void test_tailcall_bpf2bpf_3(void) .repeat = 1, ); - err = bpf_prog_test_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS, - &obj, &prog_fd); + err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -766,8 +766,8 @@ static void test_tailcall_bpf2bpf_4(bool noise) .repeat = 1, ); - err = bpf_prog_test_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS, - &obj, &prog_fd); + err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c index 17947c9e1d66..3d34bab01e48 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c @@ -3,7 +3,7 @@ void test_task_fd_query_rawtp(void) { - const char *file = "./test_get_stack_rawtp.o"; + const char *file = "./test_get_stack_rawtp.bpf.o"; __u64 probe_offset, probe_addr; __u32 len, prog_id, fd_type; struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c index c2a98a7a8dfc..c717741bf8b6 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c @@ -4,7 +4,7 @@ static void test_task_fd_query_tp_core(const char *probe_name, const char *tp_name) { - const char *file = "./test_tracepoint.o"; + const char *file = "./test_tracepoint.bpf.o"; int err, bytes, efd, prog_fd, pmu_fd; struct perf_event_attr attr = {}; __u64 probe_offset, probe_addr; diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c index 11bf755be4c9..032dbfb26256 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c @@ -3,7 +3,7 @@ void test_tcp_estats(void) { - const char *file = "./test_tcp_estats.o"; + const char *file = "./test_tcp_estats.bpf.o"; int err, prog_fd; struct bpf_object *obj; __u32 duration = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index b90ee47d3111..7295cc60f724 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -65,23 +65,23 @@ struct test_def { void test_test_global_funcs(void) { struct test_def tests[] = { - { "test_global_func1.o", "combined stack size of 4 calls is 544" }, - { "test_global_func2.o" }, - { "test_global_func3.o" , "the call stack of 8 frames" }, - { "test_global_func4.o" }, - { "test_global_func5.o" , "expected pointer to ctx, but got PTR" }, - { "test_global_func6.o" , "modified ctx ptr R2" }, - { "test_global_func7.o" , "foo() doesn't return scalar" }, - { "test_global_func8.o" }, - { "test_global_func9.o" }, - { "test_global_func10.o", "invalid indirect read from stack" }, - { "test_global_func11.o", "Caller passes invalid args into func#1" }, - { "test_global_func12.o", "invalid mem access 'mem_or_null'" }, - { "test_global_func13.o", "Caller passes invalid args into func#1" }, - { "test_global_func14.o", "reference type('FWD S') size cannot be determined" }, - { "test_global_func15.o", "At program exit the register R0 has value" }, - { "test_global_func16.o", "invalid indirect read from stack" }, - { "test_global_func17.o", "Caller passes invalid args into func#1" }, + { "test_global_func1.bpf.o", "combined stack size of 4 calls is 544" }, + { "test_global_func2.bpf.o" }, + { "test_global_func3.bpf.o", "the call stack of 8 frames" }, + { "test_global_func4.bpf.o" }, + { "test_global_func5.bpf.o", "expected pointer to ctx, but got PTR" }, + { "test_global_func6.bpf.o", "modified ctx ptr R2" }, + { "test_global_func7.bpf.o", "foo() doesn't return scalar" }, + { "test_global_func8.bpf.o" }, + { "test_global_func9.bpf.o" }, + { "test_global_func10.bpf.o", "invalid indirect read from stack" }, + { "test_global_func11.bpf.o", "Caller passes invalid args into func#1" }, + { "test_global_func12.bpf.o", "invalid mem access 'mem_or_null'" }, + { "test_global_func13.bpf.o", "Caller passes invalid args into func#1" }, + { "test_global_func14.bpf.o", "reference type('FWD S') size cannot be determined" }, + { "test_global_func15.bpf.o", "At program exit the register R0 has value" }, + { "test_global_func16.bpf.o", "invalid indirect read from stack" }, + { "test_global_func17.bpf.o", "Caller passes invalid args into func#1" }, }; libbpf_print_fn_t old_print_fn = NULL; int err, i, duration = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c index 05acb376f74d..f27013e38d03 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c +++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c @@ -72,7 +72,7 @@ void test_test_overhead(void) if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L))) return; - obj = bpf_object__open_file("./test_overhead.o", NULL); + obj = bpf_object__open_file("./test_overhead.bpf.o", NULL); if (!ASSERT_OK_PTR(obj, "obj_open_file")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c index 39e79291c82b..a479080533db 100644 --- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -6,7 +6,7 @@ void serial_test_tp_attach_query(void) const int num_progs = 3; int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; __u32 duration = 0, info_len, saved_prog_ids[num_progs]; - const char *file = "./test_tracepoint.o"; + const char *file = "./test_tracepoint.bpf.o"; struct perf_event_query_bpf *query; struct perf_event_attr attr = {}; struct bpf_object *obj[num_progs]; diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index b0acbda6dbf5..564b75bc087f 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -35,7 +35,7 @@ static struct bpf_program *load_prog(char *file, char *name, struct inst *inst) /* TODO: use different target function to run in concurrent mode */ void serial_test_trampoline_count(void) { - char *file = "test_trampoline_count.o"; + char *file = "test_trampoline_count.bpf.o"; char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" }; struct inst inst[MAX_TRAMP_PROGS + 1] = {}; struct bpf_program *prog; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index ec21c53cb1da..947863a1d536 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -8,7 +8,7 @@ void test_xdp(void) struct vip key6 = {.protocol = 6, .family = AF_INET6}; struct iptnl_info value4 = {.family = AF_INET}; struct iptnl_info value6 = {.family = AF_INET6}; - const char *file = "./test_xdp.o"; + const char *file = "./test_xdp.bpf.o"; struct bpf_object *obj; char buf[128]; struct ipv6hdr iph6; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c index 2f033da4cd45..fce203640f8c 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c @@ -4,7 +4,7 @@ static void test_xdp_update_frags(void) { - const char *file = "./test_xdp_update_frags.o"; + const char *file = "./test_xdp_update_frags.bpf.o"; int err, prog_fd, max_skb_frags, buf_size, num; struct bpf_program *prog; struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 21ceac24e174..9b9cf8458adf 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -4,7 +4,7 @@ static void test_xdp_adjust_tail_shrink(void) { - const char *file = "./test_xdp_adjust_tail_shrink.o"; + const char *file = "./test_xdp_adjust_tail_shrink.bpf.o"; __u32 expect_sz; struct bpf_object *obj; int err, prog_fd; @@ -39,7 +39,7 @@ static void test_xdp_adjust_tail_shrink(void) static void test_xdp_adjust_tail_grow(void) { - const char *file = "./test_xdp_adjust_tail_grow.o"; + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; struct bpf_object *obj; char buf[4096]; /* avoid segfault: large buf to hold grow results */ __u32 expect_sz; @@ -73,7 +73,7 @@ static void test_xdp_adjust_tail_grow(void) static void test_xdp_adjust_tail_grow2(void) { - const char *file = "./test_xdp_adjust_tail_grow.o"; + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; char buf[4096]; /* avoid segfault: large buf to hold grow results */ int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/; struct bpf_object *obj; @@ -135,7 +135,7 @@ static void test_xdp_adjust_tail_grow2(void) static void test_xdp_adjust_frags_tail_shrink(void) { - const char *file = "./test_xdp_adjust_tail_shrink.o"; + const char *file = "./test_xdp_adjust_tail_shrink.bpf.o"; __u32 exp_size; struct bpf_program *prog; struct bpf_object *obj; @@ -202,7 +202,7 @@ out: static void test_xdp_adjust_frags_tail_grow(void) { - const char *file = "./test_xdp_adjust_tail_grow.o"; + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; __u32 exp_size; struct bpf_program *prog; struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index 62aa3edda5e6..062fbc8c8e5e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -8,7 +8,7 @@ void serial_test_xdp_attach(void) { __u32 duration = 0, id1, id2, id0 = 0, len; struct bpf_object *obj1, *obj2, *obj3; - const char *file = "./test_xdp.o"; + const char *file = "./test_xdp.bpf.o"; struct bpf_prog_info info = {}; int err, fd1, fd2, fd3; LIBBPF_OPTS(bpf_xdp_attach_opts, opts); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c index 0d01ff6cb91a..cd3aa340e65e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -7,7 +7,7 @@ void serial_test_xdp_info(void) { __u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id; - const char *file = "./xdp_dummy.o"; + const char *file = "./xdp_dummy.bpf.o"; struct bpf_prog_info info = {}; struct bpf_object *obj; int err, prog_fd; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c index f543d1bd21b8..ec5369f247cb 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c @@ -3,7 +3,7 @@ void test_xdp_perf(void) { - const char *file = "./xdp_dummy.o"; + const char *file = "./xdp_dummy.bpf.o"; struct bpf_object *obj; char in[128], out[128]; int err, prog_fd; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c index 874a846e298c..75550a40e029 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c @@ -82,7 +82,7 @@ static void test_synproxy(bool xdp) SYS("ethtool -K tmp0 tx off"); if (xdp) /* Workaround required for veth. */ - SYS("ip link set tmp0 xdp object xdp_dummy.o section xdp 2> /dev/null"); + SYS("ip link set tmp0 xdp object xdp_dummy.bpf.o section xdp 2> /dev/null"); ns = open_netns("synproxy"); if (!ASSERT_OK_PTR(ns, "setns")) diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c index 48cd14b43741..4547b059d487 100644 --- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -73,10 +73,10 @@ int test_subprog2(struct args_subprog2 *ctx) __builtin_preserve_access_index(&skb->len)); ret = ctx->ret; - /* bpf_prog_test_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32 - * which randomizes upper 32 bits after BPF_ALU32 insns. - * Hence after 'w0 <<= 1' upper bits of $rax are random. - * That is expected and correct. Trim them. + /* bpf_prog_test_load() loads "test_pkt_access.bpf.o" with + * BPF_F_TEST_RND_HI32 which randomizes upper 32 bits after BPF_ALU32 + * insns. Hence after 'w0 <<= 1' upper bits of $rax are random. That is + * expected and correct. Trim them. */ ret = (__u32) ret; if (len != 74 || ret != 148) diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c index 7886265846a0..adeaf63cb6fa 100644 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ b/tools/testing/selftests/bpf/test_dev_cgroup.c @@ -16,7 +16,7 @@ #include "cgroup_helpers.h" #include "testing_helpers.h" -#define DEV_CGROUP_PROG "./dev_cgroup.o" +#define DEV_CGROUP_PROG "./dev_cgroup.bpf.o" #define TEST_CGROUP "/test-bpf-based-device-cgroup/" diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c index 2893e9f2f1e0..4694422aa76c 100644 --- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c +++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c @@ -59,7 +59,7 @@ int main(int argc, char **argv) return 2; } - ret = bpf_prog_test_load("test_lirc_mode2_kern.o", + ret = bpf_prog_test_load("test_lirc_mode2_kern.bpf.o", BPF_PROG_TYPE_LIRC_MODE2, &obj, &progfd); if (ret) { printf("Failed to load bpf program\n"); diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index cbebfaa7c1e8..c49f2056e14f 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -651,9 +651,9 @@ static void test_stackmap(unsigned int task, void *data) #include #include #include -#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o" -#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o" -#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o" +#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.bpf.o" +#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.bpf.o" +#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.bpf.o" static void test_sockmap(unsigned int tasks, void *data) { struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break; @@ -1143,8 +1143,8 @@ out_sockmap: exit(1); } -#define MAPINMAP_PROG "./test_map_in_map.o" -#define MAPINMAP_INVALID_PROG "./test_map_in_map_invalid.o" +#define MAPINMAP_PROG "./test_map_in_map.bpf.o" +#define MAPINMAP_INVALID_PROG "./test_map_in_map_invalid.bpf.o" static void test_map_in_map(void) { struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 6cd6ef9fc20b..7fc15e0d24a9 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -782,7 +782,7 @@ if out.find("/sys/kernel/debug type debugfs") == -1: cmd("mount -t debugfs none /sys/kernel/debug") # Check samples are compiled -samples = ["sample_ret0.o", "sample_map_ret0.o"] +samples = ["sample_ret0.bpf.o", "sample_map_ret0.bpf.o"] for s in samples: ret, out = cmd("ls %s/%s" % (bpf_test_dir, s), fail=False) skip(ret != 0, "sample %s/%s not found, please compile it" % @@ -803,7 +803,7 @@ cmd("ip netns delete %s" % (ns)) netns = [] try: - obj = bpf_obj("sample_ret0.o") + obj = bpf_obj("sample_ret0.bpf.o") bytecode = bpf_bytecode("1,6 0 0 4294967295,") start_test("Test destruction of generic XDP...") @@ -1023,7 +1023,7 @@ try: sim.wait_for_flush() start_test("Test non-offload XDP attaching to HW...") - bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/nooffload") + bpftool_prog_load("sample_ret0.bpf.o", "/sys/fs/bpf/nooffload") nooffload = bpf_pinned("/sys/fs/bpf/nooffload") ret, _, err = sim.set_xdp(nooffload, "offload", fail=False, include_stderr=True) @@ -1032,7 +1032,7 @@ try: rm("/sys/fs/bpf/nooffload") start_test("Test offload XDP attaching to drv...") - bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload", + bpftool_prog_load("sample_ret0.bpf.o", "/sys/fs/bpf/offload", dev=sim['ifname']) offload = bpf_pinned("/sys/fs/bpf/offload") ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True) @@ -1043,7 +1043,7 @@ try: start_test("Test XDP load failure...") sim.dfs["dev/bpf_bind_verifier_accept"] = 0 - ret, _, err = bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload", + ret, _, err = bpftool_prog_load("sample_ret0.bpf.o", "/sys/fs/bpf/offload", dev=sim['ifname'], fail=False, include_stderr=True) fail(ret == 0, "verifier should fail on load") check_verifier_log(err, "[netdevsim] Hello from netdevsim!") @@ -1169,7 +1169,7 @@ try: simdev = NetdevSimDev() sim, = simdev.nsims - map_obj = bpf_obj("sample_map_ret0.o") + map_obj = bpf_obj("sample_map_ret0.bpf.o") start_test("Test loading program with maps...") sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON @@ -1307,10 +1307,10 @@ try: sims = (simA, simB1, simB2, simB3) simB = (simB1, simB2, simB3) - bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA", + bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimA", dev=simA['ifname']) progA = bpf_pinned("/sys/fs/bpf/nsimA") - bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB", + bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimB", dev=simB1['ifname']) progB = bpf_pinned("/sys/fs/bpf/nsimB") @@ -1344,14 +1344,14 @@ try: mapA = bpftool("prog show %s" % (progA))[1]["map_ids"][0] mapB = bpftool("prog show %s" % (progB))[1]["map_ids"][0] - ret, _ = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_", + ret, _ = bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimB_", dev=simB3['ifname'], maps=["idx 0 id %d" % (mapB)], fail=False) fail(ret != 0, "couldn't reuse a map on the same ASIC") rm("/sys/fs/bpf/nsimB_") - ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA_", + ret, _, err = bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimA_", dev=simA['ifname'], maps=["idx 0 id %d" % (mapB)], fail=False, include_stderr=True) @@ -1359,7 +1359,7 @@ try: fail(err.count("offload device mismatch between prog and map") == 0, "error message missing for cross-ASIC map") - ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_", + ret, _, err = bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimB_", dev=simB1['ifname'], maps=["idx 0 id %d" % (mapA)], fail=False, include_stderr=True) diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh index a9bc6f82abc1..515c2eafc97f 100755 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh @@ -54,7 +54,7 @@ DIR=$(dirname $0) TEST_IF="test_cgid_1" TEST_IF_PEER="test_cgid_2" MAX_PING_TRIES=5 -BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.o" +BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.bpf.o" BPF_PROG_SECTION="cgroup_id_logger" BPF_PROG_ID=0 PROG="${DIR}/test_skb_cgroup_id_user" diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 458564fcfc82..2c89674fc62c 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -26,14 +26,14 @@ #endif #define CG_PATH "/foo" -#define CONNECT4_PROG_PATH "./connect4_prog.o" -#define CONNECT6_PROG_PATH "./connect6_prog.o" -#define SENDMSG4_PROG_PATH "./sendmsg4_prog.o" -#define SENDMSG6_PROG_PATH "./sendmsg6_prog.o" -#define RECVMSG4_PROG_PATH "./recvmsg4_prog.o" -#define RECVMSG6_PROG_PATH "./recvmsg6_prog.o" -#define BIND4_PROG_PATH "./bind4_prog.o" -#define BIND6_PROG_PATH "./bind6_prog.o" +#define CONNECT4_PROG_PATH "./connect4_prog.bpf.o" +#define CONNECT6_PROG_PATH "./connect6_prog.bpf.o" +#define SENDMSG4_PROG_PATH "./sendmsg4_prog.bpf.o" +#define SENDMSG6_PROG_PATH "./sendmsg6_prog.bpf.o" +#define RECVMSG4_PROG_PATH "./recvmsg4_prog.bpf.o" +#define RECVMSG6_PROG_PATH "./recvmsg6_prog.bpf.o" +#define BIND4_PROG_PATH "./bind4_prog.bpf.o" +#define BIND6_PROG_PATH "./bind6_prog.bpf.o" #define SERV4_IP "192.168.1.254" #define SERV4_REWRITE_IP "127.0.0.1" diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 0fbaccdc8861..dcb038e342d8 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -52,8 +52,8 @@ static void running_handler(int a); #define S1_PORT 10000 #define S2_PORT 10001 -#define BPF_SOCKMAP_FILENAME "test_sockmap_kern.o" -#define BPF_SOCKHASH_FILENAME "test_sockhash_kern.o" +#define BPF_SOCKMAP_FILENAME "test_sockmap_kern.bpf.o" +#define BPF_SOCKHASH_FILENAME "test_sockhash_kern.bpf.o" #define CG_PATH "/sockmap" /* global sockets */ diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index 57620e7c9048..bcdbd27f22f0 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -1372,7 +1372,7 @@ static struct sysctl_test tests[] = { }, { "C prog: deny all writes", - .prog_file = "./test_sysctl_prog.o", + .prog_file = "./test_sysctl_prog.bpf.o", .attach_type = BPF_CGROUP_SYSCTL, .sysctl = "net/ipv4/tcp_mem", .open_flags = O_WRONLY, @@ -1381,7 +1381,7 @@ static struct sysctl_test tests[] = { }, { "C prog: deny access by name", - .prog_file = "./test_sysctl_prog.o", + .prog_file = "./test_sysctl_prog.bpf.o", .attach_type = BPF_CGROUP_SYSCTL, .sysctl = "net/ipv4/route/mtu_expires", .open_flags = O_RDONLY, @@ -1389,7 +1389,7 @@ static struct sysctl_test tests[] = { }, { "C prog: read tcp_mem", - .prog_file = "./test_sysctl_prog.o", + .prog_file = "./test_sysctl_prog.bpf.o", .attach_type = BPF_CGROUP_SYSCTL, .sysctl = "net/ipv4/tcp_mem", .open_flags = O_RDONLY, diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh index 102e6588e2fe..b42c24282c25 100755 --- a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh +++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh @@ -76,7 +76,7 @@ main() DIR=$(dirname $0) TEST_IF=lo MAX_PING_TRIES=5 -BPF_PROG_OBJ="${DIR}/test_tcp_check_syncookie_kern.o" +BPF_PROG_OBJ="${DIR}/test_tcp_check_syncookie_kern.bpf.o" CLSACT_SECTION="tc" XDP_SECTION="xdp" BPF_PROG_ID=0 diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index 8284db8b0f13..595194453ff8 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c @@ -69,7 +69,7 @@ int verify_result(const struct tcpnotify_globals *result) int main(int argc, char **argv) { - const char *file = "test_tcpnotify_kern.o"; + const char *file = "test_tcpnotify_kern.bpf.o"; struct bpf_map *perf_map, *global_map; struct tcpnotify_globals g = {0}; struct perf_buffer *pb = NULL; diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh index 1d79f31480ad..0746a4fde9d3 100755 --- a/tools/testing/selftests/bpf/test_xdp_redirect.sh +++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh @@ -54,10 +54,10 @@ test_xdp_redirect() return 0 fi - ip -n ${NS1} link set veth11 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null - ip -n ${NS2} link set veth22 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null - ip link set dev veth1 $xdpmode obj test_xdp_redirect.o sec redirect_to_222 &> /dev/null - ip link set dev veth2 $xdpmode obj test_xdp_redirect.o sec redirect_to_111 &> /dev/null + ip -n ${NS1} link set veth11 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null + ip -n ${NS2} link set veth22 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null + ip link set dev veth1 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_222 &> /dev/null + ip link set dev veth2 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_111 &> /dev/null if ip netns exec ${NS1} ping -c 1 10.1.1.22 &> /dev/null && ip netns exec ${NS2} ping -c 1 10.1.1.11 &> /dev/null; then diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh index cc57cb87e65f..4c3c3fdd2d73 100755 --- a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh +++ b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh @@ -94,7 +94,7 @@ setup_ns() # Add a neigh entry for IPv4 ping test ip -n ${NS[$i]} neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0 ip -n ${NS[$i]} link set veth0 $mode obj \ - xdp_dummy.o sec xdp &> /dev/null || \ + xdp_dummy.bpf.o sec xdp &> /dev/null || \ { test_fail "Unable to load dummy xdp" && exit 1; } IFACES="$IFACES veth$i" veth_mac[$i]=$(ip -n ${NS[0]} link show veth$i | awk '/link\/ether/ {print $2}') diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh index 49936c4c8567..5211ca9a0239 100755 --- a/tools/testing/selftests/bpf/test_xdp_veth.sh +++ b/tools/testing/selftests/bpf/test_xdp_veth.sh @@ -101,7 +101,7 @@ ip -n ${NS3} link set dev veth33 up mkdir $BPF_DIR bpftool prog loadall \ - xdp_redirect_map.o $BPF_DIR/progs type xdp \ + xdp_redirect_map.bpf.o $BPF_DIR/progs type xdp \ pinmaps $BPF_DIR/maps bpftool map update pinned $BPF_DIR/maps/tx_port key 0 0 0 0 value 122 0 0 0 bpftool map update pinned $BPF_DIR/maps/tx_port key 1 0 0 0 value 133 0 0 0 @@ -110,9 +110,9 @@ ip link set dev veth1 xdp pinned $BPF_DIR/progs/xdp_redirect_map_0 ip link set dev veth2 xdp pinned $BPF_DIR/progs/xdp_redirect_map_1 ip link set dev veth3 xdp pinned $BPF_DIR/progs/xdp_redirect_map_2 -ip -n ${NS1} link set dev veth11 xdp obj xdp_dummy.o sec xdp -ip -n ${NS2} link set dev veth22 xdp obj xdp_tx.o sec xdp -ip -n ${NS3} link set dev veth33 xdp obj xdp_dummy.o sec xdp +ip -n ${NS1} link set dev veth11 xdp obj xdp_dummy.bpf.o sec xdp +ip -n ${NS2} link set dev veth22 xdp obj xdp_tx.bpf.o sec xdp +ip -n ${NS3} link set dev veth33 xdp obj xdp_dummy.bpf.o sec xdp trap cleanup EXIT diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c index c03b3a75991f..c1fc44c87c30 100644 --- a/tools/testing/selftests/bpf/xdp_redirect_multi.c +++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c @@ -142,7 +142,7 @@ int main(int argc, char **argv) } printf("\n"); - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s_kern.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); err = libbpf_get_error(obj); if (err) diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c index d874ddfb39c4..ff35320d2be9 100644 --- a/tools/testing/selftests/bpf/xdp_synproxy.c +++ b/tools/testing/selftests/bpf/xdp_synproxy.c @@ -193,7 +193,7 @@ static int syncookie_attach(const char *argv0, unsigned int ifindex, bool tc) int prog_fd; int err; - snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv0); + snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.bpf.o", argv0); obj = bpf_object__open_file(xdp_filename, NULL); err = libbpf_get_error(obj); if (err < 0) { diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c index 5b6f977870f8..1503a1d2faa0 100644 --- a/tools/testing/selftests/bpf/xdping.c +++ b/tools/testing/selftests/bpf/xdping.c @@ -168,7 +168,7 @@ int main(int argc, char **argv) /* Use libbpf 1.0 API mode */ libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s_kern.bpf.o", argv[0]); if (bpf_prog_test_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd)) { fprintf(stderr, "load of %s failed\n", filename); -- cgit v1.2.3 From af515a5587b8f45f19e11657746e0c89411b0380 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 1 Sep 2022 13:26:45 -0700 Subject: selftests/xsk: Avoid use-after-free on ctx The put lowers the reference count to 0 and frees ctx, reading it afterwards is invalid. Move the put after the uses and determine the last use by the reference count being 1. Fixes: 39e940d4abfa ("selftests/xsk: Destroy BPF resources only when ctx refcount drops to 0") Signed-off-by: Ian Rogers Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20220901202645.1463552-1-irogers@google.com --- tools/testing/selftests/bpf/xsk.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/xsk.c b/tools/testing/selftests/bpf/xsk.c index f2721a4ae7c5..0b3ff49c740d 100644 --- a/tools/testing/selftests/bpf/xsk.c +++ b/tools/testing/selftests/bpf/xsk.c @@ -1237,15 +1237,15 @@ void xsk_socket__delete(struct xsk_socket *xsk) ctx = xsk->ctx; umem = ctx->umem; - xsk_put_ctx(ctx, true); - - if (!ctx->refcount) { + if (ctx->refcount == 1) { xsk_delete_bpf_maps(xsk); close(ctx->prog_fd); if (ctx->has_bpf_link) close(ctx->link_fd); } + xsk_put_ctx(ctx, true); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (!err) { if (xsk->rx) { -- cgit v1.2.3 From f649f992deeeab020257b886e054cc407154cbfc Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Thu, 1 Sep 2022 17:29:37 -0700 Subject: selftest/bpf: Add test for bpf_getsockopt() This patch removes the __bpf_getsockopt() which directly reads the sk by using PTR_TO_BTF_ID. Instead, the test now directly uses the kernel bpf helper bpf_getsockopt() which supports all the required optname now. TCP_SAVE[D]_SYN and TCP_MAXSEG are not tested in a loop for all the hooks and sock_ops's cb. TCP_SAVE[D]_SYN only works in passive connection. TCP_MAXSEG only works when it is setsockopt before the connection is established and the getsockopt return value can only be tested after the connection is established. Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20220902002937.2896904-1-kafai@fb.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/progs/bpf_tracing_net.h | 1 + tools/testing/selftests/bpf/progs/setget_sockopt.c | 148 ++++++--------------- 2 files changed, 43 insertions(+), 106 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index 5ebc6dabef84..adb087aecc9e 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -38,6 +38,7 @@ #define TCP_USER_TIMEOUT 18 #define TCP_NOTSENT_LOWAT 25 #define TCP_SAVE_SYN 27 +#define TCP_SAVED_SYN 28 #define TCP_CA_NAME_MAX 16 #define TCP_NAGLE_OFF 1 diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 79debf3c2f44..9523333b8905 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -52,7 +52,6 @@ static const struct sockopt_test sol_socket_tests[] = { static const struct sockopt_test sol_tcp_tests[] = { { .opt = TCP_NODELAY, .flip = 1, }, - { .opt = TCP_MAXSEG, .new = 1314, .expected = 1314, }, { .opt = TCP_KEEPIDLE, .new = 123, .expected = 123, .restore = 321, }, { .opt = TCP_KEEPINTVL, .new = 123, .expected = 123, .restore = 321, }, { .opt = TCP_KEEPCNT, .new = 123, .expected = 123, .restore = 124, }, @@ -62,7 +61,6 @@ static const struct sockopt_test sol_tcp_tests[] = { { .opt = TCP_THIN_LINEAR_TIMEOUTS, .flip = 1, }, { .opt = TCP_USER_TIMEOUT, .new = 123400, .expected = 123400, }, { .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, }, - { .opt = TCP_SAVE_SYN, .new = 1, .expected = 1, }, { .opt = 0, }, }; @@ -82,102 +80,6 @@ struct loop_ctx { struct sock *sk; }; -static int __bpf_getsockopt(void *ctx, struct sock *sk, - int level, int opt, int *optval, - int optlen) -{ - if (level == SOL_SOCKET) { - switch (opt) { - case SO_REUSEADDR: - *optval = !!BPF_CORE_READ_BITFIELD(sk, sk_reuse); - break; - case SO_KEEPALIVE: - *optval = !!(sk->sk_flags & (1UL << 3)); - break; - case SO_RCVLOWAT: - *optval = sk->sk_rcvlowat; - break; - case SO_MAX_PACING_RATE: - *optval = sk->sk_max_pacing_rate; - break; - default: - return bpf_getsockopt(ctx, level, opt, optval, optlen); - } - return 0; - } - - if (level == IPPROTO_TCP) { - struct tcp_sock *tp = bpf_skc_to_tcp_sock(sk); - - if (!tp) - return -1; - - switch (opt) { - case TCP_NODELAY: - *optval = !!(BPF_CORE_READ_BITFIELD(tp, nonagle) & TCP_NAGLE_OFF); - break; - case TCP_MAXSEG: - *optval = tp->rx_opt.user_mss; - break; - case TCP_KEEPIDLE: - *optval = tp->keepalive_time / CONFIG_HZ; - break; - case TCP_SYNCNT: - *optval = tp->inet_conn.icsk_syn_retries; - break; - case TCP_KEEPINTVL: - *optval = tp->keepalive_intvl / CONFIG_HZ; - break; - case TCP_KEEPCNT: - *optval = tp->keepalive_probes; - break; - case TCP_WINDOW_CLAMP: - *optval = tp->window_clamp; - break; - case TCP_THIN_LINEAR_TIMEOUTS: - *optval = !!BPF_CORE_READ_BITFIELD(tp, thin_lto); - break; - case TCP_USER_TIMEOUT: - *optval = tp->inet_conn.icsk_user_timeout; - break; - case TCP_NOTSENT_LOWAT: - *optval = tp->notsent_lowat; - break; - case TCP_SAVE_SYN: - *optval = BPF_CORE_READ_BITFIELD(tp, save_syn); - break; - default: - return bpf_getsockopt(ctx, level, opt, optval, optlen); - } - return 0; - } - - if (level == IPPROTO_IPV6) { - switch (opt) { - case IPV6_AUTOFLOWLABEL: { - __u16 proto = sk->sk_protocol; - struct inet_sock *inet_sk; - - if (proto == IPPROTO_TCP) - inet_sk = (struct inet_sock *)bpf_skc_to_tcp_sock(sk); - else - inet_sk = (struct inet_sock *)bpf_skc_to_udp6_sock(sk); - - if (!inet_sk) - return -1; - - *optval = !!inet_sk->pinet6->autoflowlabel; - break; - } - default: - return bpf_getsockopt(ctx, level, opt, optval, optlen); - } - return 0; - } - - return bpf_getsockopt(ctx, level, opt, optval, optlen); -} - static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, const struct sockopt_test *t, int level) @@ -186,7 +88,7 @@ static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, opt = t->opt; - if (__bpf_getsockopt(ctx, sk, level, opt, &old, sizeof(old))) + if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old))) return 1; /* kernel initialized txrehash to 255 */ if (level == SOL_SOCKET && opt == SO_TXREHASH && old != 0 && old != 1) @@ -195,7 +97,7 @@ static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, new = !old; if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new))) return 1; - if (__bpf_getsockopt(ctx, sk, level, opt, &tmp, sizeof(tmp)) || + if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) || tmp != new) return 1; @@ -218,13 +120,13 @@ static int bpf_test_sockopt_int(void *ctx, struct sock *sk, else expected = t->expected; - if (__bpf_getsockopt(ctx, sk, level, opt, &old, sizeof(old)) || + if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)) || old == new) return 1; if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new))) return 1; - if (__bpf_getsockopt(ctx, sk, level, opt, &tmp, sizeof(tmp)) || + if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) || tmp != expected) return 1; @@ -410,6 +312,34 @@ static int binddev_test(void *ctx) return 0; } +static int test_tcp_maxseg(void *ctx, struct sock *sk) +{ + int val = 1314, tmp; + + if (sk->sk_state != TCP_ESTABLISHED) + return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG, + &val, sizeof(val)); + + if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG, &tmp, sizeof(tmp)) || + tmp > val) + return -1; + + return 0; +} + +static int test_tcp_saved_syn(void *ctx, struct sock *sk) +{ + __u8 saved_syn[20]; + int one = 1; + + if (sk->sk_state == TCP_LISTEN) + return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_SAVE_SYN, + &one, sizeof(one)); + + return bpf_getsockopt(ctx, IPPROTO_TCP, TCP_SAVED_SYN, + saved_syn, sizeof(saved_syn)); +} + SEC("lsm_cgroup/socket_post_create") int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, int protocol, int kern) @@ -440,16 +370,22 @@ int skops_sockopt(struct bpf_sock_ops *skops) switch (skops->op) { case BPF_SOCK_OPS_TCP_LISTEN_CB: - nr_listen += !bpf_test_sockopt(skops, sk); + nr_listen += !(bpf_test_sockopt(skops, sk) || + test_tcp_maxseg(skops, sk) || + test_tcp_saved_syn(skops, sk)); break; case BPF_SOCK_OPS_TCP_CONNECT_CB: - nr_connect += !bpf_test_sockopt(skops, sk); + nr_connect += !(bpf_test_sockopt(skops, sk) || + test_tcp_maxseg(skops, sk)); break; case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: - nr_active += !bpf_test_sockopt(skops, sk); + nr_active += !(bpf_test_sockopt(skops, sk) || + test_tcp_maxseg(skops, sk)); break; case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: - nr_passive += !bpf_test_sockopt(skops, sk); + nr_passive += !(bpf_test_sockopt(skops, sk) || + test_tcp_maxseg(skops, sk) || + test_tcp_saved_syn(skops, sk)); break; } -- cgit v1.2.3 From 37521bffdd2d1efcb1dbdfd3ee89584c8943421c Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 2 Sep 2022 14:10:45 -0700 Subject: selftests/bpf: Improve test coverage of test_maps Make test_maps more stressful with more parallelism in update/delete/lookup/walk including different value sizes. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Kumar Kartikeya Dwivedi Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220902211058.60789-4-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/test_maps.c | 38 +++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 14 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index c49f2056e14f..00b9cc305e58 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -264,10 +264,11 @@ static void test_hashmap_percpu(unsigned int task, void *data) close(fd); } +#define VALUE_SIZE 3 static int helper_fill_hashmap(int max_entries) { int i, fd, ret; - long long key, value; + long long key, value[VALUE_SIZE] = {}; fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), max_entries, &map_opts); @@ -276,8 +277,8 @@ static int helper_fill_hashmap(int max_entries) "err: %s, flags: 0x%x\n", strerror(errno), map_opts.map_flags); for (i = 0; i < max_entries; i++) { - key = i; value = key; - ret = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + key = i; value[0] = key; + ret = bpf_map_update_elem(fd, &key, value, BPF_NOEXIST); CHECK(ret != 0, "can't update hashmap", "err: %s\n", strerror(ret)); @@ -288,8 +289,8 @@ static int helper_fill_hashmap(int max_entries) static void test_hashmap_walk(unsigned int task, void *data) { - int fd, i, max_entries = 1000; - long long key, value, next_key; + int fd, i, max_entries = 10000; + long long key, value[VALUE_SIZE], next_key; bool next_key_valid = true; fd = helper_fill_hashmap(max_entries); @@ -297,7 +298,7 @@ static void test_hashmap_walk(unsigned int task, void *data) for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key, &next_key) == 0; i++) { key = next_key; - assert(bpf_map_lookup_elem(fd, &key, &value) == 0); + assert(bpf_map_lookup_elem(fd, &key, value) == 0); } assert(i == max_entries); @@ -305,9 +306,9 @@ static void test_hashmap_walk(unsigned int task, void *data) assert(bpf_map_get_next_key(fd, NULL, &key) == 0); for (i = 0; next_key_valid; i++) { next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0; - assert(bpf_map_lookup_elem(fd, &key, &value) == 0); - value++; - assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); + assert(bpf_map_lookup_elem(fd, &key, value) == 0); + value[0]++; + assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0); key = next_key; } @@ -316,8 +317,8 @@ static void test_hashmap_walk(unsigned int task, void *data) for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key, &next_key) == 0; i++) { key = next_key; - assert(bpf_map_lookup_elem(fd, &key, &value) == 0); - assert(value - 1 == key); + assert(bpf_map_lookup_elem(fd, &key, value) == 0); + assert(value[0] - 1 == key); } assert(i == max_entries); @@ -1371,16 +1372,16 @@ static void __run_parallel(unsigned int tasks, static void test_map_stress(void) { + run_parallel(100, test_hashmap_walk, NULL); run_parallel(100, test_hashmap, NULL); run_parallel(100, test_hashmap_percpu, NULL); run_parallel(100, test_hashmap_sizes, NULL); - run_parallel(100, test_hashmap_walk, NULL); run_parallel(100, test_arraymap, NULL); run_parallel(100, test_arraymap_percpu, NULL); } -#define TASKS 1024 +#define TASKS 100 #define DO_UPDATE 1 #define DO_DELETE 0 @@ -1432,6 +1433,8 @@ static void test_update_delete(unsigned int fn, void *data) int fd = ((int *)data)[0]; int i, key, value, err; + if (fn & 1) + test_hashmap_walk(fn, NULL); for (i = fn; i < MAP_SIZE; i += TASKS) { key = value = i; @@ -1455,7 +1458,7 @@ static void test_update_delete(unsigned int fn, void *data) static void test_map_parallel(void) { - int i, fd, key = 0, value = 0; + int i, fd, key = 0, value = 0, j = 0; int data[2]; fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), @@ -1466,6 +1469,7 @@ static void test_map_parallel(void) exit(1); } +again: /* Use the same fd in children to add elements to this map: * child_0 adds key=0, key=1024, key=2048, ... * child_1 adds key=1, key=1025, key=2049, ... @@ -1502,6 +1506,12 @@ static void test_map_parallel(void) key = -1; assert(bpf_map_get_next_key(fd, NULL, &key) < 0 && errno == ENOENT); assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT); + + key = 0; + bpf_map_delete_elem(fd, &key); + if (j++ < 5) + goto again; + close(fd); } static void test_map_rdonly(void) -- cgit v1.2.3 From 0fd7c5d43339b783ee3301a05f925d1e52ac87c9 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 2 Sep 2022 14:10:49 -0700 Subject: bpf: Optimize call_rcu in non-preallocated hash map. Doing call_rcu() million times a second becomes a bottle neck. Convert non-preallocated hash map from call_rcu to SLAB_TYPESAFE_BY_RCU. The rcu critical section is no longer observed for one htab element which makes non-preallocated hash map behave just like preallocated hash map. The map elements are released back to kernel memory after observing rcu critical section. This improves 'map_perf_test 4' performance from 100k events per second to 250k events per second. bpf_mem_alloc + percpu_counter + typesafe_by_rcu provide 10x performance boost to non-preallocated hash map and make it within few % of preallocated map while consuming fraction of memory. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Kumar Kartikeya Dwivedi Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220902211058.60789-8-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/progs/timer.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c index 5f5309791649..0053c5402173 100644 --- a/tools/testing/selftests/bpf/progs/timer.c +++ b/tools/testing/selftests/bpf/progs/timer.c @@ -208,17 +208,6 @@ static int timer_cb2(void *map, int *key, struct hmap_elem *val) */ bpf_map_delete_elem(map, key); - /* in non-preallocated hashmap both 'key' and 'val' are RCU - * protected and still valid though this element was deleted - * from the map. Arm this timer for ~35 seconds. When callback - * finishes the call_rcu will invoke: - * htab_elem_free_rcu - * check_and_free_timer - * bpf_timer_cancel_and_free - * to cancel this 35 second sleep and delete the timer for real. - */ - if (bpf_timer_start(&val->timer, 1ull << 35, 0) != 0) - err |= 256; ok |= 4; } return 0; -- cgit v1.2.3