summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-03-06 17:34:50 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-03-07 09:40:49 +0000
commitf6f5a30c834135c9f2fa10400c59ebbdd9188567 (patch)
tree208358216772eedab5998070878db55df5d4e772 /arch/arm
parent99c1745b9c76910e195889044f914b4898b7c9a5 (diff)
downloadlinux-3.10-f6f5a30c834135c9f2fa10400c59ebbdd9188567.tar.gz
linux-3.10-f6f5a30c834135c9f2fa10400c59ebbdd9188567.tar.bz2
linux-3.10-f6f5a30c834135c9f2fa10400c59ebbdd9188567.zip
ARM: 7356/1: perf: check that we have an event in the PMU IRQ handlers
The PMU IRQ handlers in perf assume that if a counter has overflowed then perf must be responsible. In the paranoid world of crazy hardware, this could be false, so check that we do have a valid event before attempting to dereference NULL in the interrupt path. Cc: <stable@vger.kernel.org> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kernel/perf_event_v6.c20
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/perf_event_xscale.c6
3 files changed, 12 insertions, 18 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 88bf15253fb..b78af0cc6ef 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int counter_is_active(unsigned long pmcr, int idx)
-{
- unsigned long mask = 0;
- if (idx == ARMV6_CYCLE_COUNTER)
- mask = ARMV6_PMCR_CCOUNT_IEN;
- else if (idx == ARMV6_COUNTER0)
- mask = ARMV6_PMCR_COUNT0_IEN;
- else if (idx == ARMV6_COUNTER1)
- mask = ARMV6_PMCR_COUNT1_IEN;
-
- if (mask)
- return pmcr & mask;
-
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return 0;
-}
-
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!counter_is_active(pmcr, idx))
+ /* Ignore if we don't have an event. */
+ if (!event)
continue;
/*
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 050cc8bf724..4d7095af2ab 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -960,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 831e019b017..a5bbd360cc4 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,6 +255,9 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
@@ -592,6 +595,9 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;