summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf
diff options
context:
space:
mode:
authorJiayuan Chen <jiayuan.chen@linux.dev>2026-04-07 10:26:28 +0800
committerJakub Kicinski <kuba@kernel.org>2026-04-12 12:28:05 -0700
commit04013c3ca022734ec2897b28a96d4cbd8a930407 (patch)
tree90015fcae0717b9697b7782fdf148427056729be /tools/testing/selftests/bpf
parent10f86a2a5c91fc4c4d001960f1c21abe52545ef6 (diff)
selftests/bpf: Add tests for sock_ops ctx access with same src/dst register
Add selftests to verify SOCK_OPS_GET_SK() and SOCK_OPS_GET_FIELD() correctly return NULL/zero when dst_reg == src_reg and is_fullsock == 0. Three subtests are included: - get_sk: ctx->sk with same src/dst register (SOCK_OPS_GET_SK) - get_field: ctx->snd_cwnd with same src/dst register (SOCK_OPS_GET_FIELD) - get_sk_diff_reg: ctx->sk with different src/dst register (baseline) Each BPF program uses inline asm (__naked) to force specific register allocation, reads is_fullsock first, then loads the field using the same (or different) register. The test triggers TCP_NEW_SYN_RECV via a TCP handshake and checks that the result is NULL/zero when is_fullsock == 0. Reviewed-by: Sun Jian <sun.jian.kdev@gmail.com> Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev> Acked-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://patch.msgid.link/20260407022720.162151-3-jiayuan.chen@linux.dev Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c76
-rw-r--r--tools/testing/selftests/bpf/progs/sock_ops_get_sk.c117
2 files changed, 193 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c b/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
new file mode 100644
index 000000000000..343d92c4df30
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "sock_ops_get_sk.skel.h"
+
+/* See progs/sock_ops_get_sk.c for the bug description. */
+static void run_sock_ops_test(int cgroup_fd, int prog_fd)
+{
+ int server_fd, client_fd, err;
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
+ if (!ASSERT_OK(err, "prog_attach"))
+ return;
+
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_OK_FD(server_fd, "start_server"))
+ goto detach;
+
+ /* Trigger TCP handshake which causes TCP_NEW_SYN_RECV state where
+ * is_fullsock == 0 and is_locked_tcp_sock == 0.
+ */
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_OK_FD(client_fd, "connect_to_fd"))
+ goto close_server;
+
+ close(client_fd);
+
+close_server:
+ close(server_fd);
+detach:
+ bpf_prog_detach(cgroup_fd, BPF_CGROUP_SOCK_OPS);
+}
+
+void test_ns_sock_ops_get_sk(void)
+{
+ struct sock_ops_get_sk *skel;
+ int cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/sock_ops_get_sk");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup"))
+ return;
+
+ skel = sock_ops_get_sk__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_load"))
+ goto close_cgroup;
+
+ /* Test SOCK_OPS_GET_SK with same src/dst register */
+ if (test__start_subtest("get_sk")) {
+ run_sock_ops_test(cgroup_fd,
+ bpf_program__fd(skel->progs.sock_ops_get_sk_same_reg));
+ ASSERT_EQ(skel->bss->null_seen, 1, "null_seen");
+ ASSERT_EQ(skel->bss->bug_detected, 0, "bug_not_detected");
+ }
+
+ /* Test SOCK_OPS_GET_FIELD with same src/dst register */
+ if (test__start_subtest("get_field")) {
+ run_sock_ops_test(cgroup_fd,
+ bpf_program__fd(skel->progs.sock_ops_get_field_same_reg));
+ ASSERT_EQ(skel->bss->field_null_seen, 1, "field_null_seen");
+ ASSERT_EQ(skel->bss->field_bug_detected, 0, "field_bug_not_detected");
+ }
+
+ /* Test SOCK_OPS_GET_SK with different src/dst register */
+ if (test__start_subtest("get_sk_diff_reg")) {
+ run_sock_ops_test(cgroup_fd,
+ bpf_program__fd(skel->progs.sock_ops_get_sk_diff_reg));
+ ASSERT_EQ(skel->bss->diff_reg_null_seen, 1, "diff_reg_null_seen");
+ ASSERT_EQ(skel->bss->diff_reg_bug_detected, 0, "diff_reg_bug_not_detected");
+ }
+
+ sock_ops_get_sk__destroy(skel);
+close_cgroup:
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/progs/sock_ops_get_sk.c b/tools/testing/selftests/bpf/progs/sock_ops_get_sk.c
new file mode 100644
index 000000000000..3a0689f8ce7c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_ops_get_sk.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/*
+ * Test the SOCK_OPS_GET_SK() and SOCK_OPS_GET_FIELD() macros in
+ * sock_ops_convert_ctx_access() when dst_reg == src_reg.
+ *
+ * When dst_reg == src_reg, the macros borrow a temporary register to load
+ * is_fullsock / is_locked_tcp_sock, because dst_reg holds the ctx pointer
+ * and cannot be clobbered before ctx->sk / ctx->field is read. If
+ * is_fullsock == 0 (e.g., TCP_NEW_SYN_RECV with a request_sock), the macro
+ * must still zero dst_reg so the verifier's PTR_TO_SOCKET_OR_NULL /
+ * SCALAR_VALUE type is correct at runtime. A missing clear leaves a stale
+ * ctx pointer in dst_reg that passes NULL checks (GET_SK) or leaks a kernel
+ * address as a scalar (GET_FIELD).
+ *
+ * When dst_reg != src_reg, dst_reg itself is used to load is_fullsock, so
+ * the JEQ (dst_reg == 0) naturally leaves it zeroed on the !fullsock path.
+ */
+
+int bug_detected;
+int null_seen;
+
+SEC("sockops")
+__naked void sock_ops_get_sk_same_reg(void)
+{
+ asm volatile (
+ "r7 = *(u32 *)(r1 + %[is_fullsock_off]);"
+ "r1 = *(u64 *)(r1 + %[sk_off]);"
+ "if r7 != 0 goto 2f;"
+ "if r1 == 0 goto 1f;"
+ "r1 = %[bug_detected] ll;"
+ "r2 = 1;"
+ "*(u32 *)(r1 + 0) = r2;"
+ "goto 2f;"
+ "1:"
+ "r1 = %[null_seen] ll;"
+ "r2 = 1;"
+ "*(u32 *)(r1 + 0) = r2;"
+ "2:"
+ "r0 = 1;"
+ "exit;"
+ :
+ : __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)),
+ __imm_const(sk_off, offsetof(struct bpf_sock_ops, sk)),
+ __imm_addr(bug_detected),
+ __imm_addr(null_seen)
+ : __clobber_all);
+}
+
+/* SOCK_OPS_GET_FIELD: same-register, is_locked_tcp_sock == 0 path. */
+int field_bug_detected;
+int field_null_seen;
+
+SEC("sockops")
+__naked void sock_ops_get_field_same_reg(void)
+{
+ asm volatile (
+ "r7 = *(u32 *)(r1 + %[is_fullsock_off]);"
+ "r1 = *(u32 *)(r1 + %[snd_cwnd_off]);"
+ "if r7 != 0 goto 2f;"
+ "if r1 == 0 goto 1f;"
+ "r1 = %[field_bug_detected] ll;"
+ "r2 = 1;"
+ "*(u32 *)(r1 + 0) = r2;"
+ "goto 2f;"
+ "1:"
+ "r1 = %[field_null_seen] ll;"
+ "r2 = 1;"
+ "*(u32 *)(r1 + 0) = r2;"
+ "2:"
+ "r0 = 1;"
+ "exit;"
+ :
+ : __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)),
+ __imm_const(snd_cwnd_off, offsetof(struct bpf_sock_ops, snd_cwnd)),
+ __imm_addr(field_bug_detected),
+ __imm_addr(field_null_seen)
+ : __clobber_all);
+}
+
+/* SOCK_OPS_GET_SK: different-register, is_fullsock == 0 path. */
+int diff_reg_bug_detected;
+int diff_reg_null_seen;
+
+SEC("sockops")
+__naked void sock_ops_get_sk_diff_reg(void)
+{
+ asm volatile (
+ "r7 = r1;"
+ "r6 = *(u32 *)(r7 + %[is_fullsock_off]);"
+ "r2 = *(u64 *)(r7 + %[sk_off]);"
+ "if r6 != 0 goto 2f;"
+ "if r2 == 0 goto 1f;"
+ "r1 = %[diff_reg_bug_detected] ll;"
+ "r3 = 1;"
+ "*(u32 *)(r1 + 0) = r3;"
+ "goto 2f;"
+ "1:"
+ "r1 = %[diff_reg_null_seen] ll;"
+ "r3 = 1;"
+ "*(u32 *)(r1 + 0) = r3;"
+ "2:"
+ "r0 = 1;"
+ "exit;"
+ :
+ : __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)),
+ __imm_const(sk_off, offsetof(struct bpf_sock_ops, sk)),
+ __imm_addr(diff_reg_bug_detected),
+ __imm_addr(diff_reg_null_seen)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";