From: Mark Rutland Date: Wed, 27 Apr 2011 10:20:11 +0000 (+0100) Subject: ARM: perf: move active_events into struct arm_pmu X-Git-Tag: MMI-PSA29.97-13-9~18195^2~7^2^2~11 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=03b7898d300de62078cc130fbc83b84b1d1e0f8d;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git ARM: perf: move active_events into struct arm_pmu This patch moves the active_events counter into struct arm_pmu, in preparation for supporting multiple PMUs. This also moves pmu_reserve_mutex, as it is used to guard accesses to active_events. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Signed-off-by: Will Deacon --- diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 438482ff7498..9874395e7e7a 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -82,6 +82,8 @@ struct arm_pmu { const unsigned (*event_map)[PERF_COUNT_HW_MAX]; u32 raw_event_mask; int num_events; + atomic_t active_events; + struct mutex reserve_mutex; u64 max_period; }; @@ -454,15 +456,15 @@ armpmu_reserve_hardware(void) return 0; } -static atomic_t active_events = ATOMIC_INIT(0); -static DEFINE_MUTEX(pmu_reserve_mutex); - static void hw_perf_event_destroy(struct perf_event *event) { - if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { + atomic_t *active_events = &armpmu->active_events; + struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; + + if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { armpmu_release_hardware(); - mutex_unlock(&pmu_reserve_mutex); + mutex_unlock(pmu_reserve_mutex); } } @@ -543,6 +545,7 @@ __hw_perf_event_init(struct perf_event *event) static int armpmu_event_init(struct perf_event *event) { int err = 0; + atomic_t *active_events = &armpmu->active_events; switch (event->attr.type) { case PERF_TYPE_RAW: @@ -556,15 +559,14 @@ static int armpmu_event_init(struct perf_event *event) event->destroy = hw_perf_event_destroy; - if (!atomic_inc_not_zero(&active_events)) { - mutex_lock(&pmu_reserve_mutex); - if (atomic_read(&active_events) == 0) { + if (!atomic_inc_not_zero(active_events)) { + mutex_lock(&armpmu->reserve_mutex); + if (atomic_read(active_events) == 0) err = armpmu_reserve_hardware(); - } if (!err) - atomic_inc(&active_events); - mutex_unlock(&pmu_reserve_mutex); + atomic_inc(active_events); + mutex_unlock(&armpmu->reserve_mutex); } if (err) @@ -613,6 +615,12 @@ static struct pmu pmu = { .read = armpmu_read, }; +static void __init armpmu_init(struct arm_pmu *armpmu) +{ + atomic_set(&armpmu->active_events, 0); + mutex_init(&armpmu->reserve_mutex); +} + /* Include the PMU-specific implementations. */ #include "perf_event_xscale.c" #include "perf_event_v6.c" @@ -718,6 +726,7 @@ init_hw_perf_events(void) if (armpmu) { pr_info("enabled with %s PMU driver, %d counters available\n", armpmu->name, armpmu->num_events); + armpmu_init(armpmu); perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); } else { pr_info("no hardware support available\n");