summaryrefslogtreecommitdiff
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/core-book3s.c105
-rw-r--r--arch/powerpc/perf/e500-pmu.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c93
3 files changed, 163 insertions, 37 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index aa2465e21f1a..65362e98eb26 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -880,8 +880,16 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
+ /*
+ * This event may have been disabled/stopped in record_and_restart()
+ * because we exceeded the ->event_limit. If re-starting the event,
+ * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
+ * notification is re-enabled.
+ */
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ else
+ event->hw.state = 0;
/*
* If group events scheduling transaction was started,
@@ -1305,6 +1313,16 @@ static int power_pmu_event_idx(struct perf_event *event)
return event->hw.idx;
}
+ssize_t power_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
@@ -1349,6 +1367,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
+ if (delta == 0)
+ left++;
if (period) {
if (left <= 0) {
left += period;
@@ -1412,11 +1432,8 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
return regs->nip;
}
-static bool pmc_overflow(unsigned long val)
+static bool pmc_overflow_power7(unsigned long val)
{
- if ((int)val < 0)
- return true;
-
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
@@ -1428,7 +1445,15 @@ static bool pmc_overflow(unsigned long val)
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
- if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
+ if ((0x80000000 - val) <= 256)
+ return true;
+
+ return false;
+}
+
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
return true;
return false;
@@ -1439,11 +1464,11 @@ static bool pmc_overflow(unsigned long val)
*/
static void perf_event_interrupt(struct pt_regs *regs)
{
- int i;
+ int i, j;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
- unsigned long val;
- int found = 0;
+ unsigned long val[8];
+ int found, active;
int nmi;
if (cpuhw->n_limited)
@@ -1458,33 +1483,53 @@ static void perf_event_interrupt(struct pt_regs *regs)
else
irq_enter();
- for (i = 0; i < cpuhw->n_events; ++i) {
- event = cpuhw->event[i];
- if (!event->hw.idx || is_limited_pmc(event->hw.idx))
+ /* Read all the PMCs since we'll need them a bunch of times */
+ for (i = 0; i < ppmu->n_counter; ++i)
+ val[i] = read_pmc(i + 1);
+
+ /* Try to find what caused the IRQ */
+ found = 0;
+ for (i = 0; i < ppmu->n_counter; ++i) {
+ if (!pmc_overflow(val[i]))
continue;
- val = read_pmc(event->hw.idx);
- if ((int)val < 0) {
- /* event has overflowed */
- found = 1;
- record_and_restart(event, val, regs);
+ if (is_limited_pmc(i + 1))
+ continue; /* these won't generate IRQs */
+ /*
+ * We've found one that's overflowed. For active
+ * counters we need to log this. For inactive
+ * counters, we need to reset it anyway
+ */
+ found = 1;
+ active = 0;
+ for (j = 0; j < cpuhw->n_events; ++j) {
+ event = cpuhw->event[j];
+ if (event->hw.idx == (i + 1)) {
+ active = 1;
+ record_and_restart(event, val[i], regs);
+ break;
+ }
}
+ if (!active)
+ /* reset non active counters that have overflowed */
+ write_pmc(i + 1, 0);
}
-
- /*
- * In case we didn't find and reset the event that caused
- * the interrupt, scan all events and reset any that are
- * negative, to avoid getting continual interrupts.
- * Any that we processed in the previous loop will not be negative.
- */
- if (!found) {
- for (i = 0; i < ppmu->n_counter; ++i) {
- if (is_limited_pmc(i + 1))
+ if (!found && pvr_version_is(PVR_POWER7)) {
+ /* check active counters for special buggy p7 overflow */
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
- val = read_pmc(i + 1);
- if (pmc_overflow(val))
- write_pmc(i + 1, 0);
+ if (pmc_overflow_power7(val[event->hw.idx - 1])) {
+ /* event has overflowed in a buggy way*/
+ found = 1;
+ record_and_restart(event,
+ val[event->hw.idx - 1],
+ regs);
+ }
}
}
+ if ((!found) && printk_ratelimit())
+ printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
* Reset MMCR0 to its normal value. This will set PMXE and
@@ -1537,6 +1582,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
+ power_pmu.attr_groups = ppmu->attr_groups;
+
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
diff --git a/arch/powerpc/perf/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c
index cb2e2949c8d1..fb664929f5da 100644
--- a/arch/powerpc/perf/e500-pmu.c
+++ b/arch/powerpc/perf/e500-pmu.c
@@ -24,6 +24,8 @@ static int e500_generic_events[] = {
[PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
[PERF_COUNT_HW_BRANCH_MISSES] = 15,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 2ee01e38d5e2..3c475d6267c7 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -51,6 +51,18 @@
#define MMCR1_PMCSEL_MSK 0xff
/*
+ * Power7 event codes.
+ */
+#define PME_PM_CYC 0x1e
+#define PME_PM_GCT_NOSLOT_CYC 0x100f8
+#define PME_PM_CMPLU_STALL 0x4000a
+#define PME_PM_INST_CMPL 0x2
+#define PME_PM_LD_REF_L1 0xc880
+#define PME_PM_LD_MISS_L1 0x400f0
+#define PME_PM_BRU_FIN 0x10068
+#define PME_PM_BRU_MPRED 0x400f6
+
+/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
}
static int power7_generic_events[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */
- [PERF_COUNT_HW_INSTRUCTIONS] = 2,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/
- [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */
+ [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -362,6 +374,70 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
+
+GENERIC_EVENT_ATTR(cpu-cycles, CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, INST_CMPL);
+GENERIC_EVENT_ATTR(cache-references, LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1);
+GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED);
+
+POWER_EVENT_ATTR(CYC, CYC);
+POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC);
+POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL);
+POWER_EVENT_ATTR(INST_CMPL, INST_CMPL);
+POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1);
+POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
+POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
+POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED);
+
+static struct attribute *power7_events_attr[] = {
+ GENERIC_EVENT_PTR(CYC),
+ GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(CMPLU_STALL),
+ GENERIC_EVENT_PTR(INST_CMPL),
+ GENERIC_EVENT_PTR(LD_REF_L1),
+ GENERIC_EVENT_PTR(LD_MISS_L1),
+ GENERIC_EVENT_PTR(BRU_FIN),
+ GENERIC_EVENT_PTR(BRU_MPRED),
+
+ POWER_EVENT_PTR(CYC),
+ POWER_EVENT_PTR(GCT_NOSLOT_CYC),
+ POWER_EVENT_PTR(CMPLU_STALL),
+ POWER_EVENT_PTR(INST_CMPL),
+ POWER_EVENT_PTR(LD_REF_L1),
+ POWER_EVENT_PTR(LD_MISS_L1),
+ POWER_EVENT_PTR(BRU_FIN),
+ POWER_EVENT_PTR(BRU_MPRED),
+ NULL
+};
+
+
+static struct attribute_group power7_pmu_events_group = {
+ .name = "events",
+ .attrs = power7_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-19");
+
+static struct attribute *power7_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+struct attribute_group power7_pmu_format_group = {
+ .name = "format",
+ .attrs = power7_pmu_format_attr,
+};
+
+static const struct attribute_group *power7_pmu_attr_groups[] = {
+ &power7_pmu_format_group,
+ &power7_pmu_events_group,
+ NULL,
+};
+
static struct power_pmu power7_pmu = {
.name = "POWER7",
.n_counter = 6,
@@ -373,6 +449,7 @@ static struct power_pmu power7_pmu = {
.get_alternatives = power7_get_alternatives,
.disable_pmc = power7_disable_pmc,
.flags = PPMU_ALT_SIPR,
+ .attr_groups = power7_pmu_attr_groups,
.n_generic = ARRAY_SIZE(power7_generic_events),
.generic_events = power7_generic_events,
.cache_events = &power7_cache_events,