summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/prog_tests
diff options
context:
space:
mode:
authorJakub Sitnicki <jakub@cloudflare.com>2025-11-05 21:19:47 +0100
committerMartin KaFai Lau <martin.lau@kernel.org>2025-11-10 10:52:32 -0800
commit967534e57c4439ba43b31f4af4cb85e84c86e6b7 (patch)
treeac908a28e5f7b52b10c6dabbbc2cbcd2033de086 /tools/testing/selftests/bpf/prog_tests
parentfb206fc3129bc9d4749905d4870ba05dc89126d2 (diff)
selftests/bpf: Verify skb metadata in BPF instead of userspace
Move metadata verification into the BPF TC programs. Previously, userspace read metadata from a map and verified it once at test end. Now TC programs compare metadata directly using __builtin_memcmp() and set a test_pass flag. This enables verification at multiple points during test execution rather than a single final check. Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://patch.msgid.link/20251105-skb-meta-rx-path-v4-10-5ceb08a9b37b@cloudflare.com
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c52
1 files changed, 13 insertions, 39 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
index 178292d1251a..93a1fbe6a4fd 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
@@ -171,33 +171,6 @@ static int write_test_packet(int tap_fd)
return 0;
}
-static void assert_test_result(const struct bpf_map *result_map)
-{
- int err;
- __u32 map_key = 0;
- __u8 map_value[TEST_PAYLOAD_LEN];
-
- err = bpf_map__lookup_elem(result_map, &map_key, sizeof(map_key),
- &map_value, TEST_PAYLOAD_LEN, BPF_ANY);
- if (!ASSERT_OK(err, "lookup test_result"))
- return;
-
- ASSERT_MEMEQ(&map_value, &test_payload, TEST_PAYLOAD_LEN,
- "test_result map contains test payload");
-}
-
-static bool clear_test_result(struct bpf_map *result_map)
-{
- const __u8 v[sizeof(test_payload)] = {};
- const __u32 k = 0;
- int err;
-
- err = bpf_map__update_elem(result_map, &k, sizeof(k), v, sizeof(v), BPF_ANY);
- ASSERT_OK(err, "update test_result");
-
- return err == 0;
-}
-
void test_xdp_context_veth(void)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
@@ -270,11 +243,13 @@ void test_xdp_context_veth(void)
if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx"))
goto close;
+ skel->bss->test_pass = false;
+
ret = send_test_packet(tx_ifindex);
if (!ASSERT_OK(ret, "send_test_packet"))
goto close;
- assert_test_result(skel->maps.test_result);
+ ASSERT_TRUE(skel->bss->test_pass, "test_pass");
close:
close_netns(nstoken);
@@ -286,7 +261,7 @@ close:
static void test_tuntap(struct bpf_program *xdp_prog,
struct bpf_program *tc_prio_1_prog,
struct bpf_program *tc_prio_2_prog,
- struct bpf_map *result_map)
+ bool *test_pass)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
@@ -295,8 +270,7 @@ static void test_tuntap(struct bpf_program *xdp_prog,
int tap_ifindex;
int ret;
- if (!clear_test_result(result_map))
- return;
+ *test_pass = false;
ns = netns_new(TAP_NETNS, true);
if (!ASSERT_OK_PTR(ns, "create and open ns"))
@@ -340,7 +314,7 @@ static void test_tuntap(struct bpf_program *xdp_prog,
if (!ASSERT_OK(ret, "write_test_packet"))
goto close;
- assert_test_result(result_map);
+ ASSERT_TRUE(*test_pass, "test_pass");
close:
if (tap_fd >= 0)
@@ -431,37 +405,37 @@ void test_xdp_context_tuntap(void)
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls,
NULL, /* tc prio 2 */
- skel->maps.test_result);
+ &skel->bss->test_pass);
if (test__start_subtest("dynptr_read"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_read,
NULL, /* tc prio 2 */
- skel->maps.test_result);
+ &skel->bss->test_pass);
if (test__start_subtest("dynptr_slice"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_slice,
NULL, /* tc prio 2 */
- skel->maps.test_result);
+ &skel->bss->test_pass);
if (test__start_subtest("dynptr_write"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_write,
skel->progs.ing_cls_dynptr_read,
- skel->maps.test_result);
+ &skel->bss->test_pass);
if (test__start_subtest("dynptr_slice_rdwr"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_slice_rdwr,
skel->progs.ing_cls_dynptr_slice,
- skel->maps.test_result);
+ &skel->bss->test_pass);
if (test__start_subtest("dynptr_offset"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_offset_wr,
skel->progs.ing_cls_dynptr_offset_rd,
- skel->maps.test_result);
+ &skel->bss->test_pass);
if (test__start_subtest("dynptr_offset_oob"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_offset_oob,
skel->progs.ing_cls,
- skel->maps.test_result);
+ &skel->bss->test_pass);
if (test__start_subtest("clone_data_meta_empty_on_data_write"))
test_tuntap_mirred(skel->progs.ing_xdp,
skel->progs.clone_data_meta_empty_on_data_write,