summaryrefslogtreecommitdiff
path: root/drivers/perf
diff options
context:
space:
mode:
authorJoakim Zhang <qiangqing.zhang@nxp.com>2019-12-27 19:11:54 +0800
committerJoakim Zhang <qiangqing.zhang@nxp.com>2020-01-07 10:32:55 +0800
commit05d68c514f824747fe2b04ac92b4567c1d1ee347 (patch)
treecd8d3fa1a176834496081a3779a5f8f761d521cb /drivers/perf
parenta6950601f30dbe94bc88d8f0f0841e6fa9d1532a (diff)
MLK-23195-2 perf/imx_ddr: add stop counter support for i.MX8MP
DDR perf driver now only supports free running counter, add stop counter support which is compabile with free running counter, since i.MX8MP is not free running. Add spinlock for counter value update and clear. Reviewed-by: Fugang Duan <fugang.duan@nxp.com> Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c69
1 files changed, 52 insertions, 17 deletions
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 55083c67b2bb..abd003592ff7 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/perf_event.h>
+#include <linux/spinlock.h>
#include <linux/slab.h>
#define COUNTER_CNTL 0x0
@@ -82,6 +83,7 @@ struct ddr_pmu {
const struct fsl_ddr_devtype_data *devtype_data;
int irq;
int id;
+ spinlock_t lock;
};
enum ddr_perf_filter_capabilities {
@@ -368,16 +370,20 @@ static void ddr_perf_event_update(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
int counter = hwc->idx;
+ unsigned long flags;
- do {
- prev_raw_count = local64_read(&hwc->prev_count);
- new_raw_count = ddr_perf_read_counter(pmu, counter);
- } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count);
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = ddr_perf_read_counter(pmu, counter);
delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
local64_add(delta, &event->count);
+ local64_set(&hwc->prev_count, new_raw_count);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
}
static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
@@ -402,6 +408,15 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
}
}
+static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
+{
+ int val;
+
+ val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
+
+ return val & CNTL_OVER ? true : false;
+}
+
static void ddr_perf_event_start(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -532,9 +547,9 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
{
- int i;
+ int i, ret;
struct ddr_pmu *pmu = (struct ddr_pmu *) p;
- struct perf_event *event, *cycle_event = NULL;
+ struct perf_event *event;
/* all counter will stop if cycle counter disabled */
ddr_perf_counter_enable(pmu,
@@ -544,12 +559,7 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
/*
* When the cycle counter overflows, all counters are stopped,
* and an IRQ is raised. If any other counter overflows, it
- * continues counting, and no IRQ is raised.
- *
- * Cycles occur at least 4 times as often as other events, so we
- * can update all events on a cycle counter overflow and not
- * lose events.
- *
+ * will stop and no IRQ is raised.
*/
for (i = 0; i < NUM_COUNTERS; i++) {
@@ -559,17 +569,41 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
event = pmu->events[i];
ddr_perf_event_update(event);
+ }
+
+ spin_lock(&pmu->lock);
- if (event->hw.idx == EVENT_CYCLES_COUNTER)
- cycle_event = event;
+ for (i = 0; i < NUM_COUNTERS; i++) {
+ if (!pmu->events[i])
+ continue;
+
+ if (i == EVENT_CYCLES_COUNTER)
+ continue;
+
+ event = pmu->events[i];
+
+ /* check non-cycle counters overflow */
+ ret = ddr_perf_counter_overflow(pmu, event->hw.idx);
+ if (ret)
+ dev_warn(pmu->dev, "Counter%d (not cycle counter) overflow happened, data incorrect!\n", i);
+
+ /* clear non-cycle counters */
+ ddr_perf_counter_enable(pmu, event->attr.config, event->hw.idx, true);
+
+ /* update the prev_conter */
+ local64_set(&event->hw.prev_count, 0);
}
+ if (pmu->events[EVENT_CYCLES_ID])
+ local64_set(&pmu->events[EVENT_CYCLES_ID]->hw.prev_count, 0);
+
+ /* enable cycle counter to start all counters */
ddr_perf_counter_enable(pmu,
EVENT_CYCLES_ID,
EVENT_CYCLES_COUNTER,
true);
- if (cycle_event)
- ddr_perf_event_update(cycle_event);
+
+ spin_unlock(&pmu->lock);
return IRQ_HANDLED;
}
@@ -617,6 +651,7 @@ static int ddr_perf_probe(struct platform_device *pdev)
num = ddr_perf_init(pmu, base, &pdev->dev);
platform_set_drvdata(pdev, pmu);
+ spin_lock_init(&pmu->lock);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
num);