summaryrefslogtreecommitdiff
path: root/kernel/trace/fgraph.c
diff options
context:
space:
mode:
authorSteven Rostedt (Google) <rostedt@goodmis.org>2024-06-03 15:07:17 -0400
committerSteven Rostedt (Google) <rostedt@goodmis.org>2024-06-04 10:37:11 -0400
commitdf3ec5da6a1e7f6e142680d7c5266d3af187170b (patch)
tree4ba7160fda7f476c0a6fb6201aa5ebbeb1af26fe /kernel/trace/fgraph.c
parentc132be2c4fcc1150ad0791c2a85dd4c9ad0bd0c8 (diff)
function_graph: Add pid tracing back to function graph tracer
Now that the function_graph has a main callback that handles the function graph subops tracing, it no longer honors the pid filtering of ftrace. Add back this logic in the function_graph code to update the gops callback for the entry function to test if it should trace the current task or not. Link: https://lore.kernel.org/linux-trace-kernel/20240603190822.991720703@goodmis.org Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Florent Revest <revest@chromium.org> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: bpf <bpf@vger.kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Alan Maguire <alan.maguire@oracle.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Guo Ren <guoren@kernel.org> Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/fgraph.c')
-rw-r--r--kernel/trace/fgraph.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 3ef6db53c0bf..30bed20c655f 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -854,6 +854,41 @@ void ftrace_graph_exit_task(struct task_struct *t)
kfree(ret_stack);
}
+static int fgraph_pid_func(struct ftrace_graph_ent *trace,
+ struct fgraph_ops *gops)
+{
+ struct trace_array *tr = gops->ops.private;
+ int pid;
+
+ if (tr) {
+ pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
+ if (pid == FTRACE_PID_IGNORE)
+ return 0;
+ if (pid != FTRACE_PID_TRACE &&
+ pid != current->pid)
+ return 0;
+ }
+
+ return gops->saved_func(trace, gops);
+}
+
+void fgraph_update_pid_func(void)
+{
+ struct fgraph_ops *gops;
+ struct ftrace_ops *op;
+
+ if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
+ return;
+
+ list_for_each_entry(op, &graph_ops.subop_list, list) {
+ if (op->flags & FTRACE_OPS_FL_PID) {
+ gops = container_of(op, struct fgraph_ops, ops);
+ gops->entryfunc = ftrace_pids_enabled(op) ?
+ fgraph_pid_func : gops->saved_func;
+ }
+ }
+}
+
/* Allocate a return stack for each task */
static int start_graph_tracing(void)
{
@@ -931,11 +966,15 @@ int register_ftrace_graph(struct fgraph_ops *gops)
command = FTRACE_START_FUNC_RET;
}
+ /* Always save the function, and reset at unregistering */
+ gops->saved_func = gops->entryfunc;
+
ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
error:
if (ret) {
fgraph_array[i] = &fgraph_stub;
ftrace_graph_active--;
+ gops->saved_func = NULL;
}
out:
mutex_unlock(&ftrace_lock);
@@ -979,5 +1018,6 @@ void unregister_ftrace_graph(struct fgraph_ops *gops)
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
}
out:
+ gops->saved_func = NULL;
mutex_unlock(&ftrace_lock);
}