diff options
author | Will Deacon <will.deacon@arm.com> | 2012-03-06 17:33:17 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-03-12 12:31:38 -0700 |
commit | 14e84b15bcd96521190c5566c440c56553e66fc9 (patch) | |
tree | 05e631452d56ff54e1e481ced6aec3face9dbe17 /arch/arm | |
parent | 2ae3680cf82320f15bc142cdfa8b5499d2b17edd (diff) |
ARM: 7354/1: perf: limit sample_period to half max_period in non-sampling mode
commit 5727347180ebc6b4a866fcbe00dcb39cc03acb37 upstream.
On ARM, the PMU does not stop counting after an overflow and therefore
IRQ latency affects the new counter value read by the kernel. This is
significant for non-sampling runs where it is possible for the new value
to overtake the previous one, causing the delta to be out by up to
max_period events.
Commit a737823d ("ARM: 6835/1: perf: ensure overflows aren't missed due
to IRQ latency") attempted to fix this problem by allowing interrupt
handlers to pass an overflow flag to the event update function, causing
the overflow calculation to assume that the counter passed through zero
when going from prev to new. Unfortunately, this doesn't work when
overflow occurs on the perf_task_tick path because we have the flag
cleared and end up computing a large negative delta.
This patch removes the overflow flag from armpmu_event_update and
instead limits the sample_period to half of the max_period for
non-sampling profiling runs.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/pmu.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 22 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 4 |
5 files changed, 16 insertions, 16 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 0bda22c094a6..665ef2c2277c 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -125,7 +125,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx, int overflow); + int idx); int armpmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 88b0941ce51e..ecebb893f1e7 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -187,7 +187,7 @@ armpmu_event_set_period(struct perf_event *event, u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx, int overflow) + int idx) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); u64 delta, prev_raw_count, new_raw_count; @@ -200,13 +200,7 @@ again: new_raw_count) != prev_raw_count) goto again; - new_raw_count &= armpmu->max_period; - prev_raw_count &= armpmu->max_period; - - if (overflow) - delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; - else - delta = new_raw_count - prev_raw_count; + delta = (new_raw_count - prev_raw_count) & armpmu->max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); @@ -223,7 +217,7 @@ armpmu_read(struct perf_event *event) if (hwc->idx < 0) return; - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); } static void @@ -239,7 +233,7 @@ armpmu_stop(struct perf_event *event, int flags) if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } @@ -519,7 +513,13 @@ __hw_perf_event_init(struct perf_event *event) hwc->config_base |= (unsigned long)mapping; if (!hwc->sample_period) { - hwc->sample_period = armpmu->max_period; + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = armpmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index e63d8115c01b..ccbf9856456f 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -520,7 +520,7 @@ armv6pmu_handle_irq(int irq_num, continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 1ef6d0034b85..a1f9a627a253 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1032,7 +1032,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index e0cca10a8411..5d36557bd724 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -257,7 +257,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; @@ -594,7 +594,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; |