diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-30 19:02:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-30 19:02:59 -0700 |
commit | 55bcab46955644d5a8149a9b3cc9752a336e02f8 (patch) | |
tree | a45593603de9265cb0624043ab36f4e0f98823fc /include | |
parent | 58580c86450bc09ff101f0d23fd8a162c146bc64 (diff) | |
parent | 7bec7a9134c25cecb0d7029199b59f7b1bef35b8 (diff) |
Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (47 commits)
perf report: Add --symbols parameter
perf report: Add --comms parameter
perf report: Add --dsos parameter
perf_counter tools: Adjust only prelinked symbol's addresses
perf_counter: Provide a way to enable counters on exec
perf_counter tools: Reduce perf stat measurement overhead/skew
perf stat: Use percentages for scaling output
perf_counter, x86: Update x86_pmu after WARN()
perf stat: Micro-optimize the code: memcpy is only required if no event is selected and !null_run
perf stat: Improve output
perf stat: Fix multi-run stats
perf stat: Add -n/--null option to run without counters
perf_counter tools: Remove dead code
perf_counter: Complete counter swap
perf report: Print sorted callchains per histogram entries
perf_counter tools: Prepare a small callchain framework
perf record: Fix unhandled io return value
perf_counter tools: Add alias for 'l1d' and 'l1i'
perf-report: Add bare minimum PERF_EVENT_READ parsing
perf-report: Add modes for inherited stats and no-samples
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/perf_counter.h | 46 |
1 files changed, 39 insertions, 7 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 89698d8aba5c..5e970c7d3fd5 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -178,8 +178,10 @@ struct perf_counter_attr { mmap : 1, /* include mmap data */ comm : 1, /* include comm data */ freq : 1, /* use freq, not period */ + inherit_stat : 1, /* per task counts */ + enable_on_exec : 1, /* next exec enables */ - __reserved_1 : 53; + __reserved_1 : 51; __u32 wakeup_events; /* wakeup every n events */ __u32 __reserved_2; @@ -232,6 +234,14 @@ struct perf_counter_mmap_page { __u32 lock; /* seqlock for synchronization */ __u32 index; /* hardware counter identifier */ __s64 offset; /* add to hardware counter value */ + __u64 time_enabled; /* time counter active */ + __u64 time_running; /* time counter on cpu */ + + /* + * Hole for extension of the self monitor capabilities + */ + + __u64 __reserved[123]; /* align to 1k */ /* * Control data for the mmap() data buffer. @@ -253,7 +263,6 @@ struct perf_counter_mmap_page { #define PERF_EVENT_MISC_KERNEL (1 << 0) #define PERF_EVENT_MISC_USER (2 << 0) #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) -#define PERF_EVENT_MISC_OVERFLOW (1 << 2) struct perf_event_header { __u32 type; @@ -327,9 +336,18 @@ enum perf_event_type { PERF_EVENT_FORK = 7, /* - * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field - * will be PERF_SAMPLE_* - * + * struct { + * struct perf_event_header header; + * u32 pid, tid; + * u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 parent_id; } && PERF_FORMAT_ID + * }; + */ + PERF_EVENT_READ = 8, + + /* * struct { * struct perf_event_header header; * @@ -337,8 +355,9 @@ enum perf_event_type { * { u32 pid, tid; } && PERF_SAMPLE_TID * { u64 time; } && PERF_SAMPLE_TIME * { u64 addr; } && PERF_SAMPLE_ADDR - * { u64 config; } && PERF_SAMPLE_CONFIG + * { u64 id; } && PERF_SAMPLE_ID * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u64 period; } && PERF_SAMPLE_PERIOD * * { u64 nr; * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP @@ -347,6 +366,9 @@ enum perf_event_type { * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN * }; */ + PERF_EVENT_SAMPLE = 9, + + PERF_EVENT_MAX, /* non-ABI */ }; enum perf_callchain_context { @@ -582,6 +604,7 @@ struct perf_counter_context { int nr_counters; int nr_active; int is_active; + int nr_stat; atomic_t refcount; struct task_struct *task; @@ -669,7 +692,16 @@ static inline int is_software_counter(struct perf_counter *counter) (counter->attr.type != PERF_TYPE_HW_CACHE); } -extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); +extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; + +extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); + +static inline void +perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) +{ + if (atomic_read(&perf_swcounter_enabled[event])) + __perf_swcounter_event(event, nr, nmi, regs, addr); +} extern void __perf_counter_mmap(struct vm_area_struct *vma); |