summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2008-05-12 21:21:10 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 22:29:25 +0200
commit5b82a1b08a00b2adca3d9dd9777efff40b7aaaa1 (patch)
tree4dcce4af592ca177bee3dfeb34f9b482d142e713 /kernel
parent0aa977f592f17004f9d1d545f2e1bb9ea71896c3 (diff)
Port ftrace to markers
Porting ftrace to the marker infrastructure. Don't need to chain to the wakeup tracer from the sched tracer, because markers support multiple probes connected. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> CC: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/trace/trace.h20
-rw-r--r--kernel/trace/trace_sched_switch.c171
-rw-r--r--kernel/trace/trace_sched_wakeup.c106
4 files changed, 255 insertions, 56 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ad95cca4e42e..e2e985eeee78 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2500,7 +2500,9 @@ out_activate:
success = 1;
out_running:
- ftrace_wake_up_task(rq, p, rq->curr);
+ trace_mark(kernel_sched_wakeup,
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
+ p->pid, p->state, rq, p, rq->curr);
check_preempt_curr(rq, p);
p->state = TASK_RUNNING;
@@ -2631,7 +2633,9 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
p->sched_class->task_new(rq, p);
inc_nr_running(rq);
}
- ftrace_wake_up_task(rq, p, rq->curr);
+ trace_mark(kernel_sched_wakeup_new,
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
+ p->pid, p->state, rq, p, rq->curr);
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
@@ -2804,7 +2808,11 @@ context_switch(struct rq *rq, struct task_struct *prev,
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
- ftrace_ctx_switch(rq, prev, next);
+ trace_mark(kernel_sched_schedule,
+ "prev_pid %d next_pid %d prev_state %ld "
+ "## rq %p prev %p next %p",
+ prev->pid, next->pid, prev->state,
+ rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8845033ab49d..f5de0601b408 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -234,25 +234,10 @@ void update_max_tr_single(struct trace_array *tr,
extern cycle_t ftrace_now(int cpu);
-#ifdef CONFIG_SCHED_TRACER
-extern void
-wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
-extern void
-wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr);
-#else
-static inline void
-wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
-{
-}
-static inline void
-wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
-{
-}
-#endif
-
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
typedef void
(*tracer_switch_func_t)(void *private,
+ void *__rq,
struct task_struct *prev,
struct task_struct *next);
@@ -262,9 +247,6 @@ struct tracer_switch_ops {
struct tracer_switch_ops *next;
};
-extern int register_tracer_switch(struct tracer_switch_ops *ops);
-extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
-
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index a3376478fc2c..d25ffa5eaf2b 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -16,11 +16,14 @@
static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
+static atomic_t sched_ref;
static void
-ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
+sched_switch_func(void *private, void *__rq, struct task_struct *prev,
+ struct task_struct *next)
{
- struct trace_array *tr = ctx_trace;
+ struct trace_array **ptr = private;
+ struct trace_array *tr = *ptr;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
@@ -41,10 +44,40 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
local_irq_restore(flags);
}
+static notrace void
+sched_switch_callback(void *probe_data, void *call_data,
+ const char *format, va_list *args)
+{
+ struct task_struct *prev;
+ struct task_struct *next;
+ struct rq *__rq;
+
+ if (!atomic_read(&sched_ref))
+ return;
+
+ /* skip prev_pid %d next_pid %d prev_state %ld */
+ (void)va_arg(*args, int);
+ (void)va_arg(*args, int);
+ (void)va_arg(*args, long);
+ __rq = va_arg(*args, typeof(__rq));
+ prev = va_arg(*args, typeof(prev));
+ next = va_arg(*args, typeof(next));
+
+ tracing_record_cmdline(prev);
+
+ /*
+ * If tracer_switch_func only points to the local
+ * switch func, it still needs the ptr passed to it.
+ */
+ sched_switch_func(probe_data, __rq, prev, next);
+}
+
static void
-wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
+wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
+ task_struct *curr)
{
- struct trace_array *tr = ctx_trace;
+ struct trace_array **ptr = private;
+ struct trace_array *tr = *ptr;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
@@ -67,35 +100,29 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
local_irq_restore(flags);
}
-void
-ftrace_ctx_switch(void *__rq, struct task_struct *prev,
- struct task_struct *next)
+static notrace void
+wake_up_callback(void *probe_data, void *call_data,
+ const char *format, va_list *args)
{
- if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
- tracing_record_cmdline(prev);
+ struct task_struct *curr;
+ struct task_struct *task;
+ struct rq *__rq;
- /*
- * If tracer_switch_func only points to the local
- * switch func, it still needs the ptr passed to it.
- */
- ctx_switch_func(__rq, prev, next);
+ if (likely(!tracer_enabled))
+ return;
- /*
- * Chain to the wakeup tracer (this is a NOP if disabled):
- */
- wakeup_sched_switch(prev, next);
-}
+ /* Skip pid %d state %ld */
+ (void)va_arg(*args, int);
+ (void)va_arg(*args, long);
+ /* now get the meat: "rq %p task %p rq->curr %p" */
+ __rq = va_arg(*args, typeof(__rq));
+ task = va_arg(*args, typeof(task));
+ curr = va_arg(*args, typeof(curr));
-void
-ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
- struct task_struct *curr)
-{
- wakeup_func(__rq, wakee, curr);
+ tracing_record_cmdline(task);
+ tracing_record_cmdline(curr);
- /*
- * Chain to the wakeup tracer (this is a NOP if disabled):
- */
- wakeup_sched_wakeup(wakee, curr);
+ wakeup_func(probe_data, __rq, task, curr);
}
void
@@ -132,15 +159,95 @@ static void sched_switch_reset(struct trace_array *tr)
tracing_reset(tr->data[cpu]);
}
+static int tracing_sched_register(void)
+{
+ int ret;
+
+ ret = marker_probe_register("kernel_sched_wakeup",
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
+ wake_up_callback,
+ &ctx_trace);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't add marker"
+ " probe to kernel_sched_wakeup\n");
+ return ret;
+ }
+
+ ret = marker_probe_register("kernel_sched_wakeup_new",
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
+ wake_up_callback,
+ &ctx_trace);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't add marker"
+ " probe to kernel_sched_wakeup_new\n");
+ goto fail_deprobe;
+ }
+
+ ret = marker_probe_register("kernel_sched_schedule",
+ "prev_pid %d next_pid %d prev_state %ld "
+ "## rq %p prev %p next %p",
+ sched_switch_callback,
+ &ctx_trace);
+ if (ret) {
+ pr_info("sched trace: Couldn't add marker"
+ " probe to kernel_sched_schedule\n");
+ goto fail_deprobe_wake_new;
+ }
+
+ return ret;
+fail_deprobe_wake_new:
+ marker_probe_unregister("kernel_sched_wakeup_new",
+ wake_up_callback,
+ &ctx_trace);
+fail_deprobe:
+ marker_probe_unregister("kernel_sched_wakeup",
+ wake_up_callback,
+ &ctx_trace);
+ return ret;
+}
+
+static void tracing_sched_unregister(void)
+{
+ marker_probe_unregister("kernel_sched_schedule",
+ sched_switch_callback,
+ &ctx_trace);
+ marker_probe_unregister("kernel_sched_wakeup_new",
+ wake_up_callback,
+ &ctx_trace);
+ marker_probe_unregister("kernel_sched_wakeup",
+ wake_up_callback,
+ &ctx_trace);
+}
+
+void tracing_start_sched_switch(void)
+{
+ long ref;
+
+ ref = atomic_inc_return(&sched_ref);
+ if (ref == 1)
+ tracing_sched_register();
+}
+
+void tracing_stop_sched_switch(void)
+{
+ long ref;
+
+ ref = atomic_dec_and_test(&sched_ref);
+ if (ref)
+ tracing_sched_unregister();
+}
+
static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
atomic_inc(&trace_record_cmdline_enabled);
tracer_enabled = 1;
+ tracing_start_sched_switch();
}
static void stop_sched_trace(struct trace_array *tr)
{
+ tracing_stop_sched_switch();
atomic_dec(&trace_record_cmdline_enabled);
tracer_enabled = 0;
}
@@ -181,6 +288,14 @@ static struct tracer sched_switch_trace __read_mostly =
__init static int init_sched_switch_trace(void)
{
+ int ret = 0;
+
+ if (atomic_read(&sched_ref))
+ ret = tracing_sched_register();
+ if (ret) {
+ pr_info("error registering scheduler trace\n");
+ return ret;
+ }
return register_tracer(&sched_switch_trace);
}
device_initcall(init_sched_switch_trace);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5948011006bc..5d2fb48e47f8 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -15,6 +15,7 @@
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
+#include <linux/marker.h>
#include "trace.h"
@@ -44,11 +45,13 @@ static int report_latency(cycle_t delta)
return 1;
}
-void
-wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
+static void notrace
+wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
+ struct task_struct *next)
{
unsigned long latency = 0, t0 = 0, t1 = 0;
- struct trace_array *tr = wakeup_trace;
+ struct trace_array **ptr = private;
+ struct trace_array *tr = *ptr;
struct trace_array_cpu *data;
cycle_t T0, T1, delta;
unsigned long flags;
@@ -113,6 +116,31 @@ out:
atomic_dec(&tr->data[cpu]->disabled);
}
+static notrace void
+sched_switch_callback(void *probe_data, void *call_data,
+ const char *format, va_list *args)
+{
+ struct task_struct *prev;
+ struct task_struct *next;
+ struct rq *__rq;
+
+ /* skip prev_pid %d next_pid %d prev_state %ld */
+ (void)va_arg(*args, int);
+ (void)va_arg(*args, int);
+ (void)va_arg(*args, long);
+ __rq = va_arg(*args, typeof(__rq));
+ prev = va_arg(*args, typeof(prev));
+ next = va_arg(*args, typeof(next));
+
+ tracing_record_cmdline(prev);
+
+ /*
+ * If tracer_switch_func only points to the local
+ * switch func, it still needs the ptr passed to it.
+ */
+ wakeup_sched_switch(probe_data, __rq, prev, next);
+}
+
static void __wakeup_reset(struct trace_array *tr)
{
struct trace_array_cpu *data;
@@ -188,19 +216,68 @@ out:
atomic_dec(&tr->data[cpu]->disabled);
}
-void wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
+static notrace void
+wake_up_callback(void *probe_data, void *call_data,
+ const char *format, va_list *args)
{
+ struct trace_array **ptr = probe_data;
+ struct trace_array *tr = *ptr;
+ struct task_struct *curr;
+ struct task_struct *task;
+ struct rq *__rq;
+
if (likely(!tracer_enabled))
return;
+ /* Skip pid %d state %ld */
+ (void)va_arg(*args, int);
+ (void)va_arg(*args, long);
+ /* now get the meat: "rq %p task %p rq->curr %p" */
+ __rq = va_arg(*args, typeof(__rq));
+ task = va_arg(*args, typeof(task));
+ curr = va_arg(*args, typeof(curr));
+
+ tracing_record_cmdline(task);
tracing_record_cmdline(curr);
- tracing_record_cmdline(wakee);
- wakeup_check_start(wakeup_trace, wakee, curr);
+ wakeup_check_start(tr, task, curr);
}
static void start_wakeup_tracer(struct trace_array *tr)
{
+ int ret;
+
+ ret = marker_probe_register("kernel_sched_wakeup",
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
+ wake_up_callback,
+ &wakeup_trace);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't add marker"
+ " probe to kernel_sched_wakeup\n");
+ return;
+ }
+
+ ret = marker_probe_register("kernel_sched_wakeup_new",
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
+ wake_up_callback,
+ &wakeup_trace);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't add marker"
+ " probe to kernel_sched_wakeup_new\n");
+ goto fail_deprobe;
+ }
+
+ ret = marker_probe_register("kernel_sched_schedule",
+ "prev_pid %d next_pid %d prev_state %ld "
+ "## rq %p prev %p next %p",
+ sched_switch_callback,
+ &wakeup_trace);
+ if (ret) {
+ pr_info("sched trace: Couldn't add marker"
+ " probe to kernel_sched_schedule\n");
+ goto fail_deprobe_wake_new;
+ }
+
wakeup_reset(tr);
/*
@@ -215,11 +292,28 @@ static void start_wakeup_tracer(struct trace_array *tr)
tracer_enabled = 1;
return;
+fail_deprobe_wake_new:
+ marker_probe_unregister("kernel_sched_wakeup_new",
+ wake_up_callback,
+ &wakeup_trace);
+fail_deprobe:
+ marker_probe_unregister("kernel_sched_wakeup",
+ wake_up_callback,
+ &wakeup_trace);
}
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
+ marker_probe_unregister("kernel_sched_schedule",
+ sched_switch_callback,
+ &wakeup_trace);
+ marker_probe_unregister("kernel_sched_wakeup_new",
+ wake_up_callback,
+ &wakeup_trace);
+ marker_probe_unregister("kernel_sched_wakeup",
+ wake_up_callback,
+ &wakeup_trace);
}
static void wakeup_tracer_init(struct trace_array *tr)