From 558e6547e4b8a2b13608a24a9d3679802f91c4c7 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 24 Aug 2009 12:19:47 +0800 Subject: tracing/profile: fix profile_disable vs module_unload If the correspoding module is unloaded before ftrace_profile_disable() is called, event->profile_disable() won't be called, which can cause oops: # insmod trace-events-sample.ko # perf record -f -a -e sample:foo_bar sleep 3 & # sleep 1 # rmmod trace_events_sample # insmod trace-events-sample.ko OOPS! Signed-off-by: Li Zefan LKML-Reference: <4A9214E3.2070807@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_event_profile.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/trace/trace_event_profile.c') diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 11ba5bb4ed0a..55a25c933d15 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -5,6 +5,7 @@ * */ +#include #include "trace.h" int ftrace_profile_enable(int event_id) @@ -14,7 +15,8 @@ int ftrace_profile_enable(int event_id) mutex_lock(&event_mutex); list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id && event->profile_enable) { + if (event->id == event_id && event->profile_enable && + try_module_get(event->mod)) { ret = event->profile_enable(event); break; } @@ -32,6 +34,7 @@ void ftrace_profile_disable(int event_id) list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id) { event->profile_disable(event); + module_put(event->mod); break; } } -- cgit v1.2.3 From e5e25cf47b0bdd1f7e9b8bb6368ee48e16de0c87 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 18 Sep 2009 00:54:43 +0200 Subject: tracing: Factorize the events profile accounting Factorize the events enabling accounting in a common tracing core helper. This reduces the size of the profile_enable() and profile_disable() callbacks for each trace events. Signed-off-by: Frederic Weisbecker Acked-by: Steven Rostedt Acked-by: Li Zefan Cc: Peter Zijlstra Cc: Jason Baron Cc: Masami Hiramatsu Cc: Ingo Molnar --- kernel/trace/trace_event_profile.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'kernel/trace/trace_event_profile.c') diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 55a25c933d15..df4a74efd50c 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -8,6 +8,14 @@ #include #include "trace.h" +static int ftrace_profile_enable_event(struct ftrace_event_call *event) +{ + if (atomic_inc_return(&event->profile_count)) + return 0; + + return event->profile_enable(); +} + int ftrace_profile_enable(int event_id) { struct ftrace_event_call *event; @@ -17,7 +25,7 @@ int ftrace_profile_enable(int event_id) list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id && event->profile_enable && try_module_get(event->mod)) { - ret = event->profile_enable(event); + ret = ftrace_profile_enable_event(event); break; } } @@ -26,6 +34,14 @@ int ftrace_profile_enable(int event_id) return ret; } +static void ftrace_profile_disable_event(struct ftrace_event_call *event) +{ + if (!atomic_add_negative(-1, &event->profile_count)) + return; + + event->profile_disable(); +} + void ftrace_profile_disable(int event_id) { struct ftrace_event_call *event; @@ -33,7 +49,7 @@ void ftrace_profile_disable(int event_id) mutex_lock(&event_mutex); list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id) { - event->profile_disable(event); + ftrace_profile_disable_event(event); module_put(event->mod); break; } -- cgit v1.2.3 From 20ab4425a77a1f34028cc6ce57053c22c184ba5f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 18 Sep 2009 06:10:28 +0200 Subject: tracing: Allocate the ftrace event profile buffer dynamically Currently the trace event profile buffer is allocated in the stack. But this may be too much for the stack, as the events can have large statically defined field size and can also grow with dynamic arrays. Allocate two per cpu buffer for all profiled events. The first cpu buffer is used to host every non-nmi context traces. It is protected by disabling the interrupts while writing and committing the trace. The second buffer is reserved for nmi. So that there is no race between them and the first buffer. The whole write/commit section is rcu protected because we release these buffers while deactivating the last profiling trace event. v2: Move the buffers from trace_event to be global, as pointed by Steven Rostedt. v3: Fix the syscall events to handle the profiling buffer races by disabling interrupts, now that the buffers are globals. Suggested-by: Steven Rostedt Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Li Zefan Cc: Jason Baron Cc: Masami Hiramatsu Cc: Ingo Molnar --- kernel/trace/trace_event_profile.c | 61 +++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) (limited to 'kernel/trace/trace_event_profile.c') diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index df4a74efd50c..3aaa77c3309b 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -8,12 +8,52 @@ #include #include "trace.h" +/* + * We can't use a size but a type in alloc_percpu() + * So let's create a dummy type that matches the desired size + */ +typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; + +char *trace_profile_buf; +char *trace_profile_buf_nmi; + +/* Count the events in use (per event id, not per instance) */ +static int total_profile_count; + static int ftrace_profile_enable_event(struct ftrace_event_call *event) { + char *buf; + int ret = -ENOMEM; + if (atomic_inc_return(&event->profile_count)) return 0; - return event->profile_enable(); + if (!total_profile_count++) { + buf = (char *)alloc_percpu(profile_buf_t); + if (!buf) + goto fail_buf; + + rcu_assign_pointer(trace_profile_buf, buf); + + buf = (char *)alloc_percpu(profile_buf_t); + if (!buf) + goto fail_buf_nmi; + + rcu_assign_pointer(trace_profile_buf_nmi, buf); + } + + ret = event->profile_enable(); + if (!ret) + return 0; + + kfree(trace_profile_buf_nmi); +fail_buf_nmi: + kfree(trace_profile_buf); +fail_buf: + total_profile_count--; + atomic_dec(&event->profile_count); + + return ret; } int ftrace_profile_enable(int event_id) @@ -36,10 +76,29 @@ int ftrace_profile_enable(int event_id) static void ftrace_profile_disable_event(struct ftrace_event_call *event) { + char *buf, *nmi_buf; + if (!atomic_add_negative(-1, &event->profile_count)) return; event->profile_disable(); + + if (!--total_profile_count) { + buf = trace_profile_buf; + rcu_assign_pointer(trace_profile_buf, NULL); + + nmi_buf = trace_profile_buf_nmi; + rcu_assign_pointer(trace_profile_buf_nmi, NULL); + + /* + * Ensure every events in profiling have finished before + * releasing the buffers + */ + synchronize_sched(); + + free_percpu(buf); + free_percpu(nmi_buf); + } } void ftrace_profile_disable(int event_id) -- cgit v1.2.3 From 05bafda856092de0705de239c846777bddb94974 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 20 Sep 2009 12:34:38 +0200 Subject: tracing: Export trace_profile_buf symbols ERROR: "trace_profile_buf_nmi" [fs/jbd2/jbd2.ko] undefined! ERROR: "trace_profile_buf" [fs/jbd2/jbd2.ko] undefined! ERROR: "trace_profile_buf_nmi" [fs/ext4/ext4.ko] undefined! ERROR: "trace_profile_buf" [fs/ext4/ext4.ko] undefined! ERROR: "trace_profile_buf_nmi" [arch/x86/kvm/kvm.ko] undefined! ERROR: "trace_profile_buf" [arch/x86/kvm/kvm.ko] undefined! Signed-off-by: Peter Zijlstra Acked-by: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <1253442878.7542.3.camel@laptop> [ fixed whitespace noise and checkpatch complaint ] Signed-off-by: Ingo Molnar --- kernel/trace/trace_event_profile.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/trace/trace_event_profile.c') diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 3aaa77c3309b..dd44b8768867 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -15,7 +15,10 @@ typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; char *trace_profile_buf; -char *trace_profile_buf_nmi; +EXPORT_SYMBOL_GPL(trace_profile_buf); + +char *trace_profile_buf_nmi; +EXPORT_SYMBOL_GPL(trace_profile_buf_nmi); /* Count the events in use (per event id, not per instance) */ static int total_profile_count; -- cgit v1.2.3