summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-04-08 20:30:18 +1000
committerIngo Molnar <mingo@elte.hu>2009-04-08 12:39:28 +0200
commitf708223d49ac39f5af1643985056206c98033f5b (patch)
tree67e317f49e49845183dd5e74f01a0ac14e5088b2 /arch/powerpc/kernel/perf_counter.c
parentdc66270b51a62b1a6888d5309229e638a305c47b (diff)
perf_counter: powerpc: set sample enable bit for marked instruction events
Impact: enable access to hardware feature POWER processors have the ability to "mark" a subset of the instructions and provide more detailed information on what happens to the marked instructions as they flow through the pipeline. This marking is enabled by the "sample enable" bit in MMCRA, and there are synchronization requirements around setting and clearing the bit. This adds logic to the processor-specific back-ends so that they know which events relate to marked instructions and set the sampling enable bit if any event that we want to put on the PMU is a marked instruction event. It also adds logic to the generic powerpc code to do the necessary synchronization if that bit is set. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <18908.31930.1024.228867@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r--arch/powerpc/kernel/perf_counter.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 0e5651385ddc..0697ade84dd3 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -307,6 +307,15 @@ u64 hw_perf_save_disable(void)
}
/*
+ * Disable instruction sampling if it was enabled
+ */
+ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+ mtspr(SPRN_MMCRA,
+ cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+ mb();
+ }
+
+ /*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the counters
@@ -347,12 +356,11 @@ void hw_perf_restore(u64 disable)
* (possibly updated for removal of counters).
*/
if (!cpuhw->n_added) {
- mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
+ mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
- mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
if (cpuhw->n_counters == 0)
get_lppaca()->pmcregs_in_use = 0;
- goto out;
+ goto out_enable;
}
/*
@@ -385,7 +393,7 @@ void hw_perf_restore(u64 disable)
* Then unfreeze the counters.
*/
get_lppaca()->pmcregs_in_use = 1;
- mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
+ mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
@@ -421,10 +429,20 @@ void hw_perf_restore(u64 disable)
write_pmc(counter->hw.idx, val);
perf_counter_update_userpage(counter);
}
- mb();
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
+
+ out_enable:
+ mb();
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
+ /*
+ * Enable instruction sampling if necessary
+ */
+ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+ mb();
+ mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
+ }
+
out:
local_irq_restore(flags);
}