summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-20 15:54:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-20 15:54:37 -0700
commit467f9957d9283be40101d7255d06fae7e211ff4c (patch)
tree71d155ab52b3a78bc88d0c8088b09b3c37f9357a /include
parent78f28b7c555359c67c2a0d23f7436e915329421e (diff)
parentcdf8073d6b2c6c5a3cd6ce0e6c1297157f7f99ba (diff)
Merge branch 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (58 commits) perf_counter: Fix perf_copy_attr() pointer arithmetic perf utils: Use a define for the maximum length of a trace event perf: Add timechart help text and add timechart to "perf help" tracing, x86, cpuidle: Move the end point of a C state in the power tracer perf utils: Be consistent about minimum text size in the svghelper perf timechart: Add "perf timechart record" perf: Add the timechart tool perf: Add a SVG helper library file tracing, perf: Convert the power tracer into an event tracer perf: Add a sample_event type to the event_union perf: Allow perf utilities to have "callback" options without arguments perf: Store trace event name/id pairs in perf.data perf: Add a timestamp to fork events sched_clock: Make it NMI safe perf_counter: Fix up swcounter throttling x86, perf_counter, bts: Optimize BTS overflow handling perf sched: Add --input=file option to builtin-sched.c perf trace: Sample timestamp and cpu when using record flag perf tools: Increase MAX_EVENT_LENGTH perf tools: Fix memory leak in read_ftrace_printk() ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/perf_counter.h80
-rw-r--r--include/trace/events/power.h81
-rw-r--r--include/trace/events/sched.h33
3 files changed, 188 insertions, 6 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 972f90d7a32f..bd341007c4fc 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -199,10 +199,14 @@ struct perf_counter_attr {
inherit_stat : 1, /* per task counts */
enable_on_exec : 1, /* next exec enables */
task : 1, /* trace fork/exit */
+ watermark : 1, /* wakeup_watermark */
- __reserved_1 : 50;
+ __reserved_1 : 49;
- __u32 wakeup_events; /* wakeup every n events */
+ union {
+ __u32 wakeup_events; /* wakeup every n events */
+ __u32 wakeup_watermark; /* bytes before wakeup */
+ };
__u32 __reserved_2;
__u64 __reserved_3;
@@ -332,6 +336,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid, ppid;
* u32 tid, ptid;
+ * u64 time;
* };
*/
PERF_EVENT_EXIT = 4,
@@ -352,6 +357,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid, ppid;
* u32 tid, ptid;
+ * { u64 time; } && PERF_SAMPLE_TIME
* };
*/
PERF_EVENT_FORK = 7,
@@ -521,6 +527,8 @@ struct perf_mmap_data {
atomic_t wakeup; /* needs a wakeup */
atomic_t lost; /* nr records lost */
+ long watermark; /* wakeup watermark */
+
struct perf_counter_mmap_page *user_page;
void *data_pages[0];
};
@@ -685,6 +693,17 @@ struct perf_cpu_context {
int recursion[4];
};
+struct perf_output_handle {
+ struct perf_counter *counter;
+ struct perf_mmap_data *data;
+ unsigned long head;
+ unsigned long offset;
+ int nmi;
+ int sample;
+ int locked;
+ unsigned long flags;
+};
+
#ifdef CONFIG_PERF_COUNTERS
/*
@@ -716,16 +735,38 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
extern void perf_counter_update_userpage(struct perf_counter *counter);
struct perf_sample_data {
- struct pt_regs *regs;
+ u64 type;
+
+ u64 ip;
+ struct {
+ u32 pid;
+ u32 tid;
+ } tid_entry;
+ u64 time;
u64 addr;
+ u64 id;
+ u64 stream_id;
+ struct {
+ u32 cpu;
+ u32 reserved;
+ } cpu_entry;
u64 period;
+ struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
};
+extern void perf_output_sample(struct perf_output_handle *handle,
+ struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_counter *counter);
+extern void perf_prepare_sample(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_counter *counter,
+ struct pt_regs *regs);
+
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
- struct perf_sample_data *data);
-extern void perf_counter_output(struct perf_counter *counter, int nmi,
- struct perf_sample_data *data);
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
/*
* Return 1 for a software counter, 0 for a hardware counter
@@ -775,6 +816,12 @@ extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
#define perf_instruction_pointer(regs) instruction_pointer(regs)
#endif
+extern int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_counter *counter, unsigned int size,
+ int nmi, int sample);
+extern void perf_output_end(struct perf_output_handle *handle);
+extern void perf_output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len);
#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
@@ -801,7 +848,28 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
+
+static inline int
+perf_output_begin(struct perf_output_handle *handle, struct perf_counter *c,
+ unsigned int size, int nmi, int sample) { }
+static inline void perf_output_end(struct perf_output_handle *handle) { }
+static inline void
+perf_output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len) { }
+static inline void
+perf_output_sample(struct perf_output_handle *handle,
+ struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_counter *counter) { }
+static inline void
+perf_prepare_sample(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_counter *counter,
+ struct pt_regs *regs) { }
#endif
+#define perf_output_put(handle, x) \
+ perf_output_copy((handle), &(x), sizeof(x))
+
#endif /* __KERNEL__ */
#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
new file mode 100644
index 000000000000..ea6d579261ad
--- /dev/null
+++ b/include/trace/events/power.h
@@ -0,0 +1,81 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+#ifndef _TRACE_POWER_ENUM_
+#define _TRACE_POWER_ENUM_
+enum {
+ POWER_NONE = 0,
+ POWER_CSTATE = 1,
+ POWER_PSTATE = 2,
+};
+#endif
+
+
+
+TRACE_EVENT(power_start,
+
+ TP_PROTO(unsigned int type, unsigned int state),
+
+ TP_ARGS(type, state),
+
+ TP_STRUCT__entry(
+ __field( u64, type )
+ __field( u64, state )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->state = state;
+ ),
+
+ TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state)
+);
+
+TRACE_EVENT(power_end,
+
+ TP_PROTO(int dummy),
+
+ TP_ARGS(dummy),
+
+ TP_STRUCT__entry(
+ __field( u64, dummy )
+ ),
+
+ TP_fast_assign(
+ __entry->dummy = 0xffff;
+ ),
+
+ TP_printk("dummy=%lu", (unsigned long)__entry->dummy)
+
+);
+
+
+TRACE_EVENT(power_frequency,
+
+ TP_PROTO(unsigned int type, unsigned int state),
+
+ TP_ARGS(type, state),
+
+ TP_STRUCT__entry(
+ __field( u64, type )
+ __field( u64, state )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->state = state;
+ ),
+
+ TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state)
+);
+
+#endif /* _TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b48f1ad7c946..4069c43f4187 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -380,6 +380,39 @@ TRACE_EVENT(sched_stat_wait,
);
/*
+ * Tracepoint for accounting runtime (time the task is executing
+ * on a CPU).
+ */
+TRACE_EVENT(sched_stat_runtime,
+
+ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+
+ TP_ARGS(tsk, runtime, vruntime),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, runtime )
+ __field( u64, vruntime )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->runtime = runtime;
+ __entry->vruntime = vruntime;
+ )
+ TP_perf_assign(
+ __perf_count(runtime);
+ ),
+
+ TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->runtime,
+ (unsigned long long)__entry->vruntime)
+);
+
+/*
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/