summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/perf_event_mipsxx.c
diff options
context:
space:
mode:
authorDeng-Cheng Zhu <dczhu@mips.com>2011-11-22 03:28:47 +0800
committerRalf Baechle <ralf@linux-mips.org>2011-12-07 22:04:41 +0000
commit266623b7597c97e6ff987b45719540b227751420 (patch)
treeb0507379c5d59662e8a07178825f511acf90413c /arch/mips/kernel/perf_event_mipsxx.c
parent74653ccf231a3100dd03e16e7a4178868a37332e (diff)
MIPS/Perf-events: Remove pmu and event state checking in validate_event()
Why removing pmu checking: Since 3.2-rc1, when arch level event init is called, the event is already connected to its PMU. Also, validate_event() is _only_ called by validate_group() in event init, so there is no need of checking or temporarily assigning event pmu during validate_group(). Why removing event state checking: Events could be created in PERF_EVENT_STATE_OFF (attr->disabled == 1), when these events go through this checking, validate_group() does dummy work. But we do need to do group scheduling emulation for them in event init. Again, validate_event() is _only_ called by validate_group(). Reference: http://www.spinics.net/lists/mips/msg42190.html Signed-off-by: Deng-Cheng Zhu <dczhu@mips.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: David Daney <david.daney@cavium.com> Cc: Eyal Barzilay <eyal@mips.com> Cc: Zenon Fortuna <zenon@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/3108/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/perf_event_mipsxx.c')
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c18
1 files changed, 3 insertions, 15 deletions
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index b5d6b3fa5a41..b22cc5fd596d 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -707,18 +707,6 @@ static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
}
-static int validate_event(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct hw_perf_event fake_hwc = event->hw;
-
- /* Allow mixed event group. So return 1 to pass validation. */
- if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
- return 1;
-
- return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0;
-}
-
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
@@ -726,15 +714,15 @@ static int validate_group(struct perf_event *event)
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
- if (!validate_event(&fake_cpuc, leader))
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -ENOSPC;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_cpuc, sibling))
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -ENOSPC;
}
- if (!validate_event(&fake_cpuc, event))
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
return -ENOSPC;
return 0;