summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-20 15:54:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-20 15:54:37 -0700
commit467f9957d9283be40101d7255d06fae7e211ff4c (patch)
tree71d155ab52b3a78bc88d0c8088b09b3c37f9357a /kernel
parent78f28b7c555359c67c2a0d23f7436e915329421e (diff)
parentcdf8073d6b2c6c5a3cd6ce0e6c1297157f7f99ba (diff)
Merge branch 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (58 commits) perf_counter: Fix perf_copy_attr() pointer arithmetic perf utils: Use a define for the maximum length of a trace event perf: Add timechart help text and add timechart to "perf help" tracing, x86, cpuidle: Move the end point of a C state in the power tracer perf utils: Be consistent about minimum text size in the svghelper perf timechart: Add "perf timechart record" perf: Add the timechart tool perf: Add a SVG helper library file tracing, perf: Convert the power tracer into an event tracer perf: Add a sample_event type to the event_union perf: Allow perf utilities to have "callback" options without arguments perf: Store trace event name/id pairs in perf.data perf: Add a timestamp to fork events sched_clock: Make it NMI safe perf_counter: Fix up swcounter throttling x86, perf_counter, bts: Optimize BTS overflow handling perf sched: Add --input=file option to builtin-sched.c perf trace: Sample timestamp and cpu when using record flag perf tools: Increase MAX_EVENT_LENGTH perf tools: Fix memory leak in read_ftrace_printk() ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c394
-rw-r--r--kernel/sched_clock.c122
-rw-r--r--kernel/sched_fair.c1
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/power-traces.c20
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_entries.h17
-rw-r--r--kernel/trace/trace_power.c218
8 files changed, 291 insertions, 486 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 8cb94a52d1bb..cc768ab81ac8 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2176,6 +2176,13 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
data->nr_pages = nr_pages;
atomic_set(&data->lock, -1);
+ if (counter->attr.watermark) {
+ data->watermark = min_t(long, PAGE_SIZE * nr_pages,
+ counter->attr.wakeup_watermark);
+ }
+ if (!data->watermark)
+ data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
+
rcu_assign_pointer(counter->data, data);
return 0;
@@ -2315,7 +2322,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
lock_limit >>= PAGE_SHIFT;
locked = vma->vm_mm->locked_vm + extra;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
+ !capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
}
@@ -2504,35 +2512,15 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
/*
* Output
*/
-
-struct perf_output_handle {
- struct perf_counter *counter;
- struct perf_mmap_data *data;
- unsigned long head;
- unsigned long offset;
- int nmi;
- int sample;
- int locked;
- unsigned long flags;
-};
-
-static bool perf_output_space(struct perf_mmap_data *data,
- unsigned int offset, unsigned int head)
+static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
+ unsigned long offset, unsigned long head)
{
- unsigned long tail;
unsigned long mask;
if (!data->writable)
return true;
mask = (data->nr_pages << PAGE_SHIFT) - 1;
- /*
- * Userspace could choose to issue a mb() before updating the tail
- * pointer. So that all reads will be completed before the write is
- * issued.
- */
- tail = ACCESS_ONCE(data->user_page->data_tail);
- smp_rmb();
offset = (offset - tail) & mask;
head = (head - tail) & mask;
@@ -2633,8 +2621,8 @@ out:
local_irq_restore(handle->flags);
}
-static void perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len)
+void perf_output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len)
{
unsigned int pages_mask;
unsigned int offset;
@@ -2669,16 +2657,13 @@ static void perf_output_copy(struct perf_output_handle *handle,
WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
}
-#define perf_output_put(handle, x) \
- perf_output_copy((handle), &(x), sizeof(x))
-
-static int perf_output_begin(struct perf_output_handle *handle,
- struct perf_counter *counter, unsigned int size,
- int nmi, int sample)
+int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_counter *counter, unsigned int size,
+ int nmi, int sample)
{
struct perf_counter *output_counter;
struct perf_mmap_data *data;
- unsigned int offset, head;
+ unsigned long tail, offset, head;
int have_lost;
struct {
struct perf_event_header header;
@@ -2716,16 +2701,23 @@ static int perf_output_begin(struct perf_output_handle *handle,
perf_output_lock(handle);
do {
+ /*
+ * Userspace could choose to issue a mb() before updating the
+ * tail pointer. So that all reads will be completed before the
+ * write is issued.
+ */
+ tail = ACCESS_ONCE(data->user_page->data_tail);
+ smp_rmb();
offset = head = atomic_long_read(&data->head);
head += size;
- if (unlikely(!perf_output_space(data, offset, head)))
+ if (unlikely(!perf_output_space(data, tail, offset, head)))
goto fail;
} while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
handle->offset = offset;
handle->head = head;
- if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
+ if (head - tail > data->watermark)
atomic_set(&data->wakeup, 1);
if (have_lost) {
@@ -2749,7 +2741,7 @@ out:
return -ENOSPC;
}
-static void perf_output_end(struct perf_output_handle *handle)
+void perf_output_end(struct perf_output_handle *handle)
{
struct perf_counter *counter = handle->counter;
struct perf_mmap_data *data = handle->data;
@@ -2863,156 +2855,176 @@ static void perf_output_read(struct perf_output_handle *handle,
perf_output_read_one(handle, counter);
}
-void perf_counter_output(struct perf_counter *counter, int nmi,
- struct perf_sample_data *data)
+void perf_output_sample(struct perf_output_handle *handle,
+ struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_counter *counter)
{
- int ret;
- u64 sample_type = counter->attr.sample_type;
- struct perf_output_handle handle;
- struct perf_event_header header;
- u64 ip;
- struct {
- u32 pid, tid;
- } tid_entry;
- struct perf_callchain_entry *callchain = NULL;
- int callchain_size = 0;
- u64 time;
- struct {
- u32 cpu, reserved;
- } cpu_entry;
+ u64 sample_type = data->type;
- header.type = PERF_EVENT_SAMPLE;
- header.size = sizeof(header);
+ perf_output_put(handle, *header);
- header.misc = 0;
- header.misc |= perf_misc_flags(data->regs);
-
- if (sample_type & PERF_SAMPLE_IP) {
- ip = perf_instruction_pointer(data->regs);
- header.size += sizeof(ip);
- }
-
- if (sample_type & PERF_SAMPLE_TID) {
- /* namespace issues */
- tid_entry.pid = perf_counter_pid(counter, current);
- tid_entry.tid = perf_counter_tid(counter, current);
-
- header.size += sizeof(tid_entry);
- }
+ if (sample_type & PERF_SAMPLE_IP)
+ perf_output_put(handle, data->ip);
- if (sample_type & PERF_SAMPLE_TIME) {
- /*
- * Maybe do better on x86 and provide cpu_clock_nmi()
- */
- time = sched_clock();
+ if (sample_type & PERF_SAMPLE_TID)
+ perf_output_put(handle, data->tid_entry);
- header.size += sizeof(u64);
- }
+ if (sample_type & PERF_SAMPLE_TIME)
+ perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ADDR)
- header.size += sizeof(u64);
+ perf_output_put(handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
- header.size += sizeof(u64);
+ perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
- header.size += sizeof(u64);
-
- if (sample_type & PERF_SAMPLE_CPU) {
- header.size += sizeof(cpu_entry);
+ perf_output_put(handle, data->stream_id);
- cpu_entry.cpu = raw_smp_processor_id();
- cpu_entry.reserved = 0;
- }
+ if (sample_type & PERF_SAMPLE_CPU)
+ perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
- header.size += sizeof(u64);
+ perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
- header.size += perf_counter_read_size(counter);
+ perf_output_read(handle, counter);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
- callchain = perf_callchain(data->regs);
+ if (data->callchain) {
+ int size = 1;
- if (callchain) {
- callchain_size = (1 + callchain->nr) * sizeof(u64);
- header.size += callchain_size;
- } else
- header.size += sizeof(u64);
+ if (data->callchain)
+ size += data->callchain->nr;
+
+ size *= sizeof(u64);
+
+ perf_output_copy(handle, data->callchain, size);
+ } else {
+ u64 nr = 0;
+ perf_output_put(handle, nr);
+ }
}
if (sample_type & PERF_SAMPLE_RAW) {
- int size = sizeof(u32);
+ if (data->raw) {
+ perf_output_put(handle, data->raw->size);
+ perf_output_copy(handle, data->raw->data,
+ data->raw->size);
+ } else {
+ struct {
+ u32 size;
+ u32 data;
+ } raw = {
+ .size = sizeof(u32),
+ .data = 0,
+ };
+ perf_output_put(handle, raw);
+ }
+ }
+}
- if (data->raw)
- size += data->raw->size;
- else
- size += sizeof(u32);
+void perf_prepare_sample(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_counter *counter,
+ struct pt_regs *regs)
+{
+ u64 sample_type = counter->attr.sample_type;
- WARN_ON_ONCE(size & (sizeof(u64)-1));
- header.size += size;
- }
+ data->type = sample_type;
- ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
- if (ret)
- return;
+ header->type = PERF_EVENT_SAMPLE;
+ header->size = sizeof(*header);
- perf_output_put(&handle, header);
+ header->misc = 0;
+ header->misc |= perf_misc_flags(regs);
- if (sample_type & PERF_SAMPLE_IP)
- perf_output_put(&handle, ip);
+ if (sample_type & PERF_SAMPLE_IP) {
+ data->ip = perf_instruction_pointer(regs);
- if (sample_type & PERF_SAMPLE_TID)
- perf_output_put(&handle, tid_entry);
+ header->size += sizeof(data->ip);
+ }
- if (sample_type & PERF_SAMPLE_TIME)
- perf_output_put(&handle, time);
+ if (sample_type & PERF_SAMPLE_TID) {
+ /* namespace issues */
+ data->tid_entry.pid = perf_counter_pid(counter, current);
+ data->tid_entry.tid = perf_counter_tid(counter, current);
+
+ header->size += sizeof(data->tid_entry);
+ }
+
+ if (sample_type & PERF_SAMPLE_TIME) {
+ data->time = perf_clock();
+
+ header->size += sizeof(data->time);
+ }
if (sample_type & PERF_SAMPLE_ADDR)
- perf_output_put(&handle, data->addr);
+ header->size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_ID) {
- u64 id = primary_counter_id(counter);
+ data->id = primary_counter_id(counter);
- perf_output_put(&handle, id);
+ header->size += sizeof(data->id);
}
- if (sample_type & PERF_SAMPLE_STREAM_ID)
- perf_output_put(&handle, counter->id);
+ if (sample_type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = counter->id;
- if (sample_type & PERF_SAMPLE_CPU)
- perf_output_put(&handle, cpu_entry);
+ header->size += sizeof(data->stream_id);
+ }
+
+ if (sample_type & PERF_SAMPLE_CPU) {
+ data->cpu_entry.cpu = raw_smp_processor_id();
+ data->cpu_entry.reserved = 0;
+
+ header->size += sizeof(data->cpu_entry);
+ }
if (sample_type & PERF_SAMPLE_PERIOD)
- perf_output_put(&handle, data->period);
+ header->size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_READ)
- perf_output_read(&handle, counter);
+ header->size += perf_counter_read_size(counter);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
- if (callchain)
- perf_output_copy(&handle, callchain, callchain_size);
- else {
- u64 nr = 0;
- perf_output_put(&handle, nr);
- }
+ int size = 1;
+
+ data->callchain = perf_callchain(regs);
+
+ if (data->callchain)
+ size += data->callchain->nr;
+
+ header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
- if (data->raw) {
- perf_output_put(&handle, data->raw->size);
- perf_output_copy(&handle, data->raw->data, data->raw->size);
- } else {
- struct {
- u32 size;
- u32 data;
- } raw = {
- .size = sizeof(u32),
- .data = 0,
- };
- perf_output_put(&handle, raw);
- }
+ int size = sizeof(u32);
+
+ if (data->raw)
+ size += data->raw->size;
+ else
+ size += sizeof(u32);
+
+ WARN_ON_ONCE(size & (sizeof(u64)-1));
+ header->size += size;
}
+}
+
+static void perf_counter_output(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct perf_output_handle handle;
+ struct perf_event_header header;
+
+ perf_prepare_sample(&header, data, counter, regs);
+
+ if (perf_output_begin(&handle, counter, header.size, nmi, 1))
+ return;
+
+ perf_output_sample(&handle, &header, data, counter);
perf_output_end(&handle);
}
@@ -3071,6 +3083,7 @@ struct perf_task_event {
u32 ppid;
u32 tid;
u32 ptid;
+ u64 time;
} event;
};
@@ -3078,9 +3091,12 @@ static void perf_counter_task_output(struct perf_counter *counter,
struct perf_task_event *task_event)
{
struct perf_output_handle handle;
- int size = task_event->event.header.size;
+ int size;
struct task_struct *task = task_event->task;
- int ret = perf_output_begin(&handle, counter, size, 0, 0);
+ int ret;
+
+ size = task_event->event.header.size;
+ ret = perf_output_begin(&handle, counter, size, 0, 0);
if (ret)
return;
@@ -3091,7 +3107,10 @@ static void perf_counter_task_output(struct perf_counter *counter,
task_event->event.tid = perf_counter_tid(counter, task);
task_event->event.ptid = perf_counter_tid(counter, current);
+ task_event->event.time = perf_clock();
+
perf_output_put(&handle, task_event->event);
+
perf_output_end(&handle);
}
@@ -3473,7 +3492,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
.misc = 0,
.size = sizeof(throttle_event),
},
- .time = sched_clock(),
+ .time = perf_clock(),
.id = primary_counter_id(counter),
.stream_id = counter->id,
};
@@ -3493,14 +3512,16 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
* Generic counter overflow handling, sampling.
*/
-int perf_counter_overflow(struct perf_counter *counter, int nmi,
- struct perf_sample_data *data)
+static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
+ int throttle, struct perf_sample_data *data,
+ struct pt_regs *regs)
{
int events = atomic_read(&counter->event_limit);
- int throttle = counter->pmu->unthrottle != NULL;
struct hw_perf_counter *hwc = &counter->hw;
int ret = 0;
+ throttle = (throttle && counter->pmu->unthrottle != NULL);
+
if (!throttle) {
hwc->interrupts++;
} else {
@@ -3523,7 +3544,7 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
}
if (counter->attr.freq) {
- u64 now = sched_clock();
+ u64 now = perf_clock();
s64 delta = now - hwc->freq_stamp;
hwc->freq_stamp = now;
@@ -3549,10 +3570,17 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
perf_counter_disable(counter);
}
- perf_counter_output(counter, nmi, data);
+ perf_counter_output(counter, nmi, data, regs);
return ret;
}
+int perf_counter_overflow(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ return __perf_counter_overflow(counter, nmi, 1, data, regs);
+}
+
/*
* Generic software counter infrastructure
*/
@@ -3588,9 +3616,11 @@ again:
}
static void perf_swcounter_overflow(struct perf_counter *counter,
- int nmi, struct perf_sample_data *data)
+ int nmi, struct perf_sample_data *data,
+ struct pt_regs *regs)
{
struct hw_perf_counter *hwc = &counter->hw;
+ int throttle = 0;
u64 overflow;
data->period = counter->hw.last_period;
@@ -3600,13 +3630,15 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
return;
for (; overflow; overflow--) {
- if (perf_counter_overflow(counter, nmi, data)) {
+ if (__perf_counter_overflow(counter, nmi, throttle,
+ data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
+ throttle = 1;
}
}
@@ -3618,7 +3650,8 @@ static void perf_swcounter_unthrottle(struct perf_counter *counter)
}
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
- int nmi, struct perf_sample_data *data)
+ int nmi, struct perf_sample_data *data,
+ struct pt_regs *regs)
{
struct hw_perf_counter *hwc = &counter->hw;
@@ -3627,11 +3660,11 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
if (!hwc->sample_period)
return;
- if (!data->regs)
+ if (!regs)
return;
if (!atomic64_add_negative(nr, &hwc->period_left))
- perf_swcounter_overflow(counter, nmi, data);
+ perf_swcounter_overflow(counter, nmi, data, regs);
}
static int perf_swcounter_is_counting(struct perf_counter *counter)
@@ -3690,7 +3723,8 @@ static int perf_swcounter_match(struct perf_counter *counter,
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
enum perf_type_id type,
u32 event, u64 nr, int nmi,
- struct perf_sample_data *data)
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
struct perf_counter *counter;
@@ -3699,8 +3733,8 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
rcu_read_lock();
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
- if (perf_swcounter_match(counter, type, event, data->regs))
- perf_swcounter_add(counter, nr, nmi, data);
+ if (perf_swcounter_match(counter, type, event, regs))
+ perf_swcounter_add(counter, nr, nmi, data, regs);
}
rcu_read_unlock();
}
@@ -3721,7 +3755,8 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
u64 nr, int nmi,
- struct perf_sample_data *data)
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
int *recursion = perf_swcounter_recursion_context(cpuctx);
@@ -3734,7 +3769,7 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
barrier();
perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
- nr, nmi, data);
+ nr, nmi, data, regs);
rcu_read_lock();
/*
* doesn't really matter which of the child contexts the
@@ -3742,7 +3777,7 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
*/
ctx = rcu_dereference(current->perf_counter_ctxp);
if (ctx)
- perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
+ perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs);
rcu_read_unlock();
barrier();
@@ -3756,11 +3791,11 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
- .regs = regs,
.addr = addr,
};
- do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
+ do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi,
+ &data, regs);
}
static void perf_swcounter_read(struct perf_counter *counter)
@@ -3797,6 +3832,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
+ struct pt_regs *regs;
struct perf_counter *counter;
u64 period;
@@ -3804,17 +3840,17 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
counter->pmu->read(counter);
data.addr = 0;
- data.regs = get_irq_regs();
+ regs = get_irq_regs();
/*
* In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP.
*/
- if ((counter->attr.exclude_kernel || !data.regs) &&
+ if ((counter->attr.exclude_kernel || !regs) &&
!counter->attr.exclude_user)
- data.regs = task_pt_regs(current);
+ regs = task_pt_regs(current);
- if (data.regs) {
- if (perf_counter_overflow(counter, 0, &data))
+ if (regs) {
+ if (perf_counter_overflow(counter, 0, &data, regs))
ret = HRTIMER_NORESTART;
}
@@ -3950,15 +3986,17 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
};
struct perf_sample_data data = {
- .regs = get_irq_regs(),
.addr = addr,
.raw = &raw,
};
- if (!data.regs)
- data.regs = task_pt_regs(current);
+ struct pt_regs *regs = get_irq_regs();
+
+ if (!regs)
+ regs = task_pt_regs(current);
- do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
+ do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
+ &data, regs);
}
EXPORT_SYMBOL_GPL(perf_tpcounter_event);
@@ -4170,8 +4208,8 @@ done:
static int perf_copy_attr(struct perf_counter_attr __user *uattr,
struct perf_counter_attr *attr)
{
- int ret;
u32 size;
+ int ret;
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
return -EFAULT;
@@ -4196,19 +4234,19 @@ static int perf_copy_attr(struct perf_counter_attr __user *uattr,
/*
* If we're handed a bigger struct than we know of,
- * ensure all the unknown bits are 0.
+ * ensure all the unknown bits are 0 - i.e. new
+ * user-space does not rely on any kernel feature
+ * extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
- unsigned long val;
- unsigned long __user *addr;
- unsigned long __user *end;
+ unsigned char __user *addr;
+ unsigned char __user *end;
+ unsigned char val;
- addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
- sizeof(unsigned long));
- end = PTR_ALIGN((void __user *)uattr + size,
- sizeof(unsigned long));
+ addr = (void __user *)uattr + sizeof(*attr);
+ end = (void __user *)uattr + size;
- for (; addr < end; addr += sizeof(unsigned long)) {
+ for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index e1d16c9a7680..ac2e1dc708bd 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -48,13 +48,6 @@ static __read_mostly int sched_clock_running;
__read_mostly int sched_clock_stable;
struct sched_clock_data {
- /*
- * Raw spinlock - this is a special case: this might be called
- * from within instrumentation code so we dont want to do any
- * instrumentation ourselves.
- */
- raw_spinlock_t lock;
-
u64 tick_raw;
u64 tick_gtod;
u64 clock;
@@ -80,7 +73,6 @@ void sched_clock_init(void)
for_each_possible_cpu(cpu) {
struct sched_clock_data *scd = cpu_sdc(cpu);
- scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
scd->tick_raw = 0;
scd->tick_gtod = ktime_now;
scd->clock = ktime_now;
@@ -109,14 +101,19 @@ static inline u64 wrap_max(u64 x, u64 y)
* - filter out backward motion
* - use the GTOD tick value to create a window to filter crazy TSC values
*/
-static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
+static u64 sched_clock_local(struct sched_clock_data *scd)
{
- s64 delta = now - scd->tick_raw;
- u64 clock, min_clock, max_clock;
+ u64 now, clock, old_clock, min_clock, max_clock;
+ s64 delta;
+again:
+ now = sched_clock();
+ delta = now - scd->tick_raw;
if (unlikely(delta < 0))
delta = 0;
+ old_clock = scd->clock;
+
/*
* scd->clock = clamp(scd->tick_gtod + delta,
* max(scd->tick_gtod, scd->clock),
@@ -124,84 +121,73 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
*/
clock = scd->tick_gtod + delta;
- min_clock = wrap_max(scd->tick_gtod, scd->clock);
- max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
+ min_clock = wrap_max(scd->tick_gtod, old_clock);
+ max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);
- scd->clock = clock;
+ if (cmpxchg(&scd->clock, old_clock, clock) != old_clock)
+ goto again;
- return scd->clock;
+ return clock;
}
-static void lock_double_clock(struct sched_clock_data *data1,
- struct sched_clock_data *data2)
+static u64 sched_clock_remote(struct sched_clock_data *scd)
{
- if (data1 < data2) {
- __raw_spin_lock(&data1->lock);
- __raw_spin_lock(&data2->lock);
+ struct sched_clock_data *my_scd = this_scd();
+ u64 this_clock, remote_clock;
+ u64 *ptr, old_val, val;
+
+ sched_clock_local(my_scd);
+again:
+ this_clock = my_scd->clock;
+ remote_clock = scd->clock;
+
+ /*
+ * Use the opportunity that we have both locks
+ * taken to couple the two clocks: we take the
+ * larger time as the latest time for both
+ * runqueues. (this creates monotonic movement)
+ */
+ if (likely((s64)(remote_clock - this_clock) < 0)) {
+ ptr = &scd->clock;
+ old_val = remote_clock;
+ val = this_clock;
} else {
- __raw_spin_lock(&data2->lock);
- __raw_spin_lock(&data1->lock);
+ /*
+ * Should be rare, but possible:
+ */
+ ptr = &my_scd->clock;
+ old_val = this_clock;
+ val = remote_clock;
}
+
+ if (cmpxchg(ptr, old_val, val) != old_val)
+ goto again;
+
+ return val;
}
u64 sched_clock_cpu(int cpu)
{
- u64 now, clock, this_clock, remote_clock;
struct sched_clock_data *scd;
+ u64 clock;
+
+ WARN_ON_ONCE(!irqs_disabled());
if (sched_clock_stable)
return sched_clock();
- scd = cpu_sdc(cpu);
-
- /*
- * Normally this is not called in NMI context - but if it is,
- * trying to do any locking here is totally lethal.
- */
- if (unlikely(in_nmi()))
- return scd->clock;
-
if (unlikely(!sched_clock_running))
return 0ull;
- WARN_ON_ONCE(!irqs_disabled());
- now = sched_clock();
-
- if (cpu != raw_smp_processor_id()) {
- struct sched_clock_data *my_scd = this_scd();
-
- lock_double_clock(scd, my_scd);
-
- this_clock = __update_sched_clock(my_scd, now);
- remote_clock = scd->clock;
-
- /*
- * Use the opportunity that we have both locks
- * taken to couple the two clocks: we take the
- * larger time as the latest time for both
- * runqueues. (this creates monotonic movement)
- */
- if (likely((s64)(remote_clock - this_clock) < 0)) {
- clock = this_clock;
- scd->clock = clock;
- } else {
- /*
- * Should be rare, but possible:
- */
- clock = remote_clock;
- my_scd->clock = remote_clock;
- }
-
- __raw_spin_unlock(&my_scd->lock);
- } else {
- __raw_spin_lock(&scd->lock);
- clock = __update_sched_clock(scd, now);
- }
+ scd = cpu_sdc(cpu);
- __raw_spin_unlock(&scd->lock);
+ if (cpu != smp_processor_id())
+ clock = sched_clock_remote(scd);
+ else
+ clock = sched_clock_local(scd);
return clock;
}
@@ -223,11 +209,9 @@ void sched_clock_tick(void)
now_gtod = ktime_to_ns(ktime_get());
now = sched_clock();
- __raw_spin_lock(&scd->lock);
scd->tick_raw = now;
scd->tick_gtod = now_gtod;
- __update_sched_clock(scd, now);
- __raw_spin_unlock(&scd->lock);
+ sched_clock_local(scd);
}
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 10d218ab69f2..990b188803ce 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -513,6 +513,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
+ trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
cpuacct_charge(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 844164dca90a..26f03ac07c2b 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
-obj-$(CONFIG_POWER_TRACER) += trace_power.o
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
@@ -54,5 +53,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
+obj-$(CONFIG_EVENT_TRACING) += power-traces.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
new file mode 100644
index 000000000000..e06c6e3d56a3
--- /dev/null
+++ b/kernel/trace/power-traces.c
@@ -0,0 +1,20 @@
+/*
+ * Power trace points
+ *
+ * Copyright (C) 2009 Arjan van de Ven <arjan@linux.intel.com>
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(power_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency);
+
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 86bcff94791a..405cb850b75d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -11,7 +11,6 @@
#include <linux/ftrace.h>
#include <trace/boot.h>
#include <linux/kmemtrace.h>
-#include <trace/power.h>
#include <linux/trace_seq.h>
#include <linux/ftrace_event.h>
@@ -37,7 +36,6 @@ enum trace_type {
TRACE_HW_BRANCHES,
TRACE_KMEM_ALLOC,
TRACE_KMEM_FREE,
- TRACE_POWER,
TRACE_BLK,
__TRACE_LAST_TYPE,
@@ -207,7 +205,6 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
- IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
TRACE_KMEM_ALLOC); \
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index a431748ddd6e..ead3d724599d 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -330,23 +330,6 @@ FTRACE_ENTRY(hw_branch, hw_branch_entry,
F_printk("from: %llx to: %llx", __entry->from, __entry->to)
);
-FTRACE_ENTRY(power, trace_power,
-
- TRACE_POWER,
-
- F_STRUCT(
- __field_struct( struct power_trace, state_data )
- __field_desc( s64, state_data, stamp )
- __field_desc( s64, state_data, end )
- __field_desc( int, state_data, type )
- __field_desc( int, state_data, state )
- ),
-
- F_printk("%llx->%llx type:%u state:%u",
- __entry->stamp, __entry->end,
- __entry->type, __entry->state)
-);
-
FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
TRACE_KMEM_ALLOC,
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
deleted file mode 100644
index fe1a00f1445a..000000000000
--- a/kernel/trace/trace_power.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * ring buffer based C-state tracer
- *
- * Arjan van de Ven <arjan@linux.intel.com>
- * Copyright (C) 2008 Intel Corporation
- *
- * Much is borrowed from trace_boot.c which is
- * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <trace/power.h>
-#include <linux/kallsyms.h>
-#include <linux/module.h>
-
-#include "trace.h"
-#include "trace_output.h"
-
-static struct trace_array *power_trace;
-static int __read_mostly trace_power_enabled;
-
-static void probe_power_start(struct power_trace *it, unsigned int type,
- unsigned int level)
-{
- if (!trace_power_enabled)
- return;
-
- memset(it, 0, sizeof(struct power_trace));
- it->state = level;
- it->type = type;
- it->stamp = ktime_get();
-}
-
-
-static void probe_power_end(struct power_trace *it)
-{
- struct ftrace_event_call *call = &event_power;
- struct ring_buffer_event *event;
- struct ring_buffer *buffer;
- struct trace_power *entry;
- struct trace_array_cpu *data;
- struct trace_array *tr = power_trace;
-
- if (!trace_power_enabled)
- return;
-
- buffer = tr->buffer;
-
- preempt_disable();
- it->end = ktime_get();
- data = tr->data[smp_processor_id()];
-
- event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
- sizeof(*entry), 0, 0);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->state_data = *it;
- if (!filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, 0, 0);
- out:
- preempt_enable();
-}
-
-static void probe_power_mark(struct power_trace *it, unsigned int type,
- unsigned int level)
-{
- struct ftrace_event_call *call = &event_power;
- struct ring_buffer_event *event;
- struct ring_buffer *buffer;
- struct trace_power *entry;
- struct trace_array_cpu *data;
- struct trace_array *tr = power_trace;
-
- if (!trace_power_enabled)
- return;
-
- buffer = tr->buffer;
-
- memset(it, 0, sizeof(struct power_trace));
- it->state = level;
- it->type = type;
- it->stamp = ktime_get();
- preempt_disable();
- it->end = it->stamp;
- data = tr->data[smp_processor_id()];
-
- event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
- sizeof(*entry), 0, 0);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->state_data = *it;
- if (!filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, 0, 0);
- out:
- preempt_enable();
-}
-
-static int tracing_power_register(void)
-{
- int ret;
-
- ret = register_trace_power_start(probe_power_start);
- if (ret) {
- pr_info("power trace: Couldn't activate tracepoint"
- " probe to trace_power_start\n");
- return ret;
- }
- ret = register_trace_power_end(probe_power_end);
- if (ret) {
- pr_info("power trace: Couldn't activate tracepoint"
- " probe to trace_power_end\n");
- goto fail_start;
- }
- ret = register_trace_power_mark(probe_power_mark);
- if (ret) {
- pr_info("power trace: Couldn't activate tracepoint"
- " probe to trace_power_mark\n");
- goto fail_end;
- }
- return ret;
-fail_end:
- unregister_trace_power_end(probe_power_end);
-fail_start:
- unregister_trace_power_start(probe_power_start);
- return ret;
-}
-
-static void start_power_trace(struct trace_array *tr)
-{
- trace_power_enabled = 1;
-}
-
-static void stop_power_trace(struct trace_array *tr)
-{
- trace_power_enabled = 0;
-}
-
-static void power_trace_reset(struct trace_array *tr)
-{
- trace_power_enabled = 0;
- unregister_trace_power_start(probe_power_start);
- unregister_trace_power_end(probe_power_end);
- unregister_trace_power_mark(probe_power_mark);
-}
-
-
-static int power_trace_init(struct trace_array *tr)
-{
- power_trace = tr;
-
- trace_power_enabled = 1;
- tracing_power_register();
-
- tracing_reset_online_cpus(tr);
- return 0;
-}
-
-static enum print_line_t power_print_line(struct trace_iterator *iter)
-{
- int ret = 0;
- struct trace_entry *entry = iter->ent;
- struct trace_power *field ;
- struct power_trace *it;
- struct trace_seq *s = &iter->seq;
- struct timespec stamp;
- struct timespec duration;
-
- trace_assign_type(field, entry);
- it = &field->state_data;
- stamp = ktime_to_timespec(it->stamp);
- duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
-
- if (entry->type == TRACE_POWER) {
- if (it->type == POWER_CSTATE)
- ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
- stamp.tv_sec,
- stamp.tv_nsec,
- it->state, iter->cpu,
- duration.tv_sec,
- duration.tv_nsec);
- if (it->type == POWER_PSTATE)
- ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
- stamp.tv_sec,
- stamp.tv_nsec,
- it->state, iter->cpu);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- return TRACE_TYPE_HANDLED;
- }
- return TRACE_TYPE_UNHANDLED;
-}
-
-static void power_print_header(struct seq_file *s)
-{
- seq_puts(s, "# TIMESTAMP STATE EVENT\n");
- seq_puts(s, "# | | |\n");
-}
-
-static struct tracer power_tracer __read_mostly =
-{
- .name = "power",
- .init = power_trace_init,
- .start = start_power_trace,
- .stop = stop_power_trace,
- .reset = power_trace_reset,
- .print_line = power_print_line,
- .print_header = power_print_header,
-};
-
-static int init_power_trace(void)
-{
- return register_tracer(&power_tracer);
-}
-device_initcall(init_power_trace);