summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2026-01-30 13:40:09 -0800
committerAndrii Nakryiko <andrii@kernel.org>2026-01-30 13:40:09 -0800
commitc7fbf8d9b808ff3774474be6313b79ea684c61a9 (patch)
treed16374fcfa251b231abc1af7aa066f61e0703033
parentcd77618c418254b827f2a807b4c27b97088fdb52 (diff)
parent4173b494d93a8057d3ed23e65853cd76b647f870 (diff)
Merge branch 'x86-fgraph-bpf-fix-orc-stack-unwind-from-kprobe_multi'
Jiri Olsa says: ==================== x86/fgraph,bpf: Fix ORC stack unwind from kprobe_multi hi, Mahe reported missing function from stack trace on top of kprobe multi program. It turned out the latest fix [1] needs some more fixing. v2 changes: - keep the unwind same as for kprobes, attached function is part of entry probe stacktrace, not kretprobe [Steven] - several change in trigger bench [Andrii] - added selftests for standard kprobes and fentry/fexit probes [Andrii] Note I'll try to add similar stacktrace adjustment for fentry/fexit in separate patchset to not complicate this change. thanks, jirka [1] https://lore.kernel.org/bpf/20251104215405.168643-1-jolsa@kernel.org/ --- ==================== Link: https://patch.msgid.link/20260126211837.472802-1-jolsa@kernel.org Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-rw-r--r--arch/x86/include/asm/ftrace.h2
-rw-r--r--arch/x86/kernel/ftrace_64.S5
-rw-r--r--tools/testing/selftests/bpf/bench.c4
-rw-r--r--tools/testing/selftests/bpf/bench.h1
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c120
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_ips.c27
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c46
8 files changed, 190 insertions, 16 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b08c95872eed..c56e1e63b893 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -57,7 +57,7 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
}
#define arch_ftrace_partial_regs(regs) do { \
- regs->flags &= ~X86_EFLAGS_FIXED; \
+ regs->flags |= X86_EFLAGS_FIXED; \
regs->cs = __KERNEL_CS; \
} while (0)
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index a132608265f6..62c1c93aa1c6 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -364,6 +364,9 @@ SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
+ /* Store original rsp for pt_regs.sp value. */
+ movq %rsp, %rdi
+
/* Restore return_to_handler value that got eaten by previous ret instruction. */
subq $8, %rsp
UNWIND_HINT_FUNC
@@ -374,7 +377,7 @@ SYM_CODE_START(return_to_handler)
movq %rax, RAX(%rsp)
movq %rdx, RDX(%rsp)
movq %rbp, RBP(%rsp)
- movq %rsp, RSP(%rsp)
+ movq %rdi, RSP(%rsp)
movq %rsp, %rdi
call ftrace_return_to_handler
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index bd29bb2e6cb5..8368bd3a0665 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -265,6 +265,7 @@ static const struct argp_option opts[] = {
{ "verbose", 'v', NULL, 0, "Verbose debug output"},
{ "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"},
{ "quiet", 'q', NULL, 0, "Be more quiet"},
+ { "stacktrace", 's', NULL, 0, "Get stack trace"},
{ "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0,
"Set of CPUs for producer threads; implies --affinity"},
{ "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0,
@@ -350,6 +351,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
case 'q':
env.quiet = true;
break;
+ case 's':
+ env.stacktrace = true;
+ break;
case ARG_PROD_AFFINITY_SET:
env.affinity = true;
if (parse_num_list(arg, &env.prod_cpus.cpus,
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index bea323820ffb..7cf21936e7ed 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -26,6 +26,7 @@ struct env {
bool list;
bool affinity;
bool quiet;
+ bool stacktrace;
int consumer_cnt;
int producer_cnt;
int nr_cpus;
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index 34018fc3927f..aeec9edd3851 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -146,6 +146,7 @@ static void setup_ctx(void)
bpf_program__set_autoload(ctx.skel->progs.trigger_driver, true);
ctx.skel->rodata->batch_iters = args.batch_iters;
+ ctx.skel->rodata->stacktrace = env.stacktrace;
}
static void load_ctx(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
index c9efdd2a5b18..da42b00e3d1f 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
@@ -74,11 +74,20 @@ static void test_stacktrace_ips_kprobe_multi(bool retprobe)
load_kallsyms();
- check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
- ksym_get_addr("bpf_testmod_stacktrace_test_3"),
- ksym_get_addr("bpf_testmod_stacktrace_test_2"),
- ksym_get_addr("bpf_testmod_stacktrace_test_1"),
- ksym_get_addr("bpf_testmod_test_read"));
+ if (retprobe) {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ } else {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5,
+ ksym_get_addr("bpf_testmod_stacktrace_test"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ }
cleanup:
stacktrace_ips__destroy(skel);
@@ -128,6 +137,99 @@ cleanup:
stacktrace_ips__destroy(skel);
}
+static void test_stacktrace_ips_kprobe(bool retprobe)
+{
+ LIBBPF_OPTS(bpf_kprobe_opts, opts,
+ .retprobe = retprobe
+ );
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct stacktrace_ips *skel;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ skel->links.kprobe_test = bpf_program__attach_kprobe_opts(
+ skel->progs.kprobe_test,
+ "bpf_testmod_stacktrace_test", &opts);
+ if (!ASSERT_OK_PTR(skel->links.kprobe_test, "bpf_program__attach_kprobe_opts"))
+ goto cleanup;
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ if (retprobe) {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ } else {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5,
+ ksym_get_addr("bpf_testmod_stacktrace_test"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ }
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
+static void test_stacktrace_ips_trampoline(bool retprobe)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct stacktrace_ips *skel;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ if (retprobe) {
+ skel->links.fexit_test = bpf_program__attach_trace(skel->progs.fexit_test);
+ if (!ASSERT_OK_PTR(skel->links.fexit_test, "bpf_program__attach_trace"))
+ goto cleanup;
+ } else {
+ skel->links.fentry_test = bpf_program__attach_trace(skel->progs.fentry_test);
+ if (!ASSERT_OK_PTR(skel->links.fentry_test, "bpf_program__attach_trace"))
+ goto cleanup;
+ }
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ if (retprobe) {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ } else {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5,
+ ksym_get_addr("bpf_testmod_stacktrace_test"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ }
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
static void __test_stacktrace_ips(void)
{
if (test__start_subtest("kprobe_multi"))
@@ -136,6 +238,14 @@ static void __test_stacktrace_ips(void)
test_stacktrace_ips_kprobe_multi(true);
if (test__start_subtest("raw_tp"))
test_stacktrace_ips_raw_tp();
+ if (test__start_subtest("kprobe"))
+ test_stacktrace_ips_kprobe(false);
+ if (test__start_subtest("kretprobe"))
+ test_stacktrace_ips_kprobe(true);
+ if (test__start_subtest("fentry"))
+ test_stacktrace_ips_trampoline(false);
+ if (test__start_subtest("fexit"))
+ test_stacktrace_ips_trampoline(true);
}
#else
static void __test_stacktrace_ips(void)
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_ips.c b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
index a96c8150d7f5..6830f2978613 100644
--- a/tools/testing/selftests/bpf/progs/stacktrace_ips.c
+++ b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
@@ -31,6 +31,13 @@ int unused(void)
__u32 stack_key;
+SEC("kprobe")
+int kprobe_test(struct pt_regs *ctx)
+{
+ stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+ return 0;
+}
+
SEC("kprobe.multi")
int kprobe_multi_test(struct pt_regs *ctx)
{
@@ -46,4 +53,24 @@ int rawtp_test(void *ctx)
return 0;
}
+SEC("fentry/bpf_testmod_stacktrace_test")
+int fentry_test(struct pt_regs *ctx)
+{
+ /*
+ * Skip 2 bpf_program/trampoline stack entries:
+ * - bpf_prog_bd1f7a949f55fb03_fentry_test
+ * - bpf_trampoline_182536277701
+ */
+ stack_key = bpf_get_stackid(ctx, &stackmap, 2);
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_stacktrace_test")
+int fexit_test(struct pt_regs *ctx)
+{
+ /* Skip 2 bpf_program/trampoline stack entries, check fentry_test. */
+ stack_key = bpf_get_stackid(ctx, &stackmap, 2);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 2898b3749d07..4ea0422d1042 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -25,6 +25,34 @@ static __always_inline void inc_counter(void)
__sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1);
}
+volatile const int stacktrace;
+
+typedef __u64 stack_trace_t[128];
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stack_heap SEC(".maps");
+
+static __always_inline void do_stacktrace(void *ctx)
+{
+ if (!stacktrace)
+ return;
+
+ __u64 *ptr = bpf_map_lookup_elem(&stack_heap, &(__u32){0});
+
+ if (ptr)
+ bpf_get_stack(ctx, ptr, sizeof(stack_trace_t), 0);
+}
+
+static __always_inline void handle(void *ctx)
+{
+ inc_counter();
+ do_stacktrace(ctx);
+}
+
SEC("?uprobe")
int bench_trigger_uprobe(void *ctx)
{
@@ -81,21 +109,21 @@ int trigger_driver_kfunc(void *ctx)
SEC("?kprobe/bpf_get_numa_node_id")
int bench_trigger_kprobe(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}
SEC("?kretprobe/bpf_get_numa_node_id")
int bench_trigger_kretprobe(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}
SEC("?kprobe.multi/bpf_get_numa_node_id")
int bench_trigger_kprobe_multi(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}
@@ -108,7 +136,7 @@ int bench_kprobe_multi_empty(void *ctx)
SEC("?kretprobe.multi/bpf_get_numa_node_id")
int bench_trigger_kretprobe_multi(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}
@@ -121,34 +149,34 @@ int bench_kretprobe_multi_empty(void *ctx)
SEC("?fentry/bpf_get_numa_node_id")
int bench_trigger_fentry(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}
SEC("?fexit/bpf_get_numa_node_id")
int bench_trigger_fexit(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}
SEC("?fmod_ret/bpf_modify_return_test_tp")
int bench_trigger_fmodret(void *ctx)
{
- inc_counter();
+ handle(ctx);
return -22;
}
SEC("?tp/bpf_test_run/bpf_trigger_tp")
int bench_trigger_tp(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}
SEC("?raw_tp/bpf_trigger_tp")
int bench_trigger_rawtp(void *ctx)
{
- inc_counter();
+ handle(ctx);
return 0;
}