*/
#define ARMPMU_MAX_HWEVENTS 32
-/* The events for a given CPU. */
-struct cpu_hw_events {
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
/*
- * The events that are active on the CPU for the given index.
+ * The events that are active on the PMU for the given index.
*/
struct perf_event **events;
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
struct arm_pmu {
struct pmu pmu;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
- int (*get_event_idx)(struct cpu_hw_events *cpuc,
+ int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct hw_perf_event *hwc);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
- struct cpu_hw_events *(*get_hw_events)(void);
+ struct pmu_hw_events *(*get_hw_events)(void);
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *armpmu;
+static struct arm_pmu *cpu_pmu;
enum arm_perf_pmu_ids
armpmu_get_pmu_id(void)
{
int id = -ENODEV;
- if (armpmu != NULL)
- id = armpmu->id;
+ if (cpu_pmu != NULL)
+ id = cpu_pmu->id;
return id;
}
{
int max_events = 0;
- if (armpmu != NULL)
- max_events = armpmu->num_events;
+ if (cpu_pmu != NULL)
+ max_events = cpu_pmu->num_events;
return max_events;
}
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct cpu_hw_events *cpuc = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0);
armpmu_stop(event, PERF_EF_UPDATE);
- cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event);
}
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct cpu_hw_events *cpuc = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
- idx = armpmu->get_event_idx(cpuc, hwc);
+ idx = armpmu->get_event_idx(hw_events, hwc);
if (idx < 0) {
err = idx;
goto out;
*/
event->hw.idx = idx;
armpmu->disable(hwc, idx);
- cpuc->events[idx] = event;
+ hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
}
static int
-validate_event(struct cpu_hw_events *cpuc,
+validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
- return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
+ return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
}
static int
validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
- struct cpu_hw_events fake_pmu;
+ struct pmu_hw_events fake_pmu;
memset(&fake_pmu, 0, sizeof(fake_pmu));
static void armpmu_enable(struct pmu *pmu)
{
- struct arm_pmu *armpmu = to_arm_pmu(pmu);
/* Enable all of the perf events on hardware. */
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
int idx, enabled = 0;
- struct cpu_hw_events *cpuc = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
for (idx = 0; idx < armpmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
+ struct perf_event *event = hw_events->events[idx];
if (!event)
continue;
* This requires SMP to be available, so exists as a separate initcall.
*/
static int __init
-armpmu_reset(void)
+cpu_pmu_reset(void)
{
- if (armpmu && armpmu->reset)
- return on_each_cpu(armpmu->reset, NULL, 1);
+ if (cpu_pmu && cpu_pmu->reset)
+ return on_each_cpu(cpu_pmu->reset, NULL, 1);
return 0;
}
-arch_initcall(armpmu_reset);
+arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
- armpmu->plat_device = pdev;
+ cpu_pmu->plat_device = pdev;
return 0;
}
}
device_initcall(register_pmu_driver);
-static struct cpu_hw_events *armpmu_get_cpu_events(void)
+static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
return &__get_cpu_var(cpu_hw_events);
}
{
int cpu;
for_each_possible_cpu(cpu) {
- struct cpu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+ struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
raw_spin_lock_init(&events->pmu_lock);
case 0xB360: /* ARM1136 */
case 0xB560: /* ARM1156 */
case 0xB760: /* ARM1176 */
- armpmu = armv6pmu_init();
+ cpu_pmu = armv6pmu_init();
break;
case 0xB020: /* ARM11mpcore */
- armpmu = armv6mpcore_pmu_init();
+ cpu_pmu = armv6mpcore_pmu_init();
break;
case 0xC080: /* Cortex-A8 */
- armpmu = armv7_a8_pmu_init();
+ cpu_pmu = armv7_a8_pmu_init();
break;
case 0xC090: /* Cortex-A9 */
- armpmu = armv7_a9_pmu_init();
+ cpu_pmu = armv7_a9_pmu_init();
break;
case 0xC050: /* Cortex-A5 */
- armpmu = armv7_a5_pmu_init();
+ cpu_pmu = armv7_a5_pmu_init();
break;
case 0xC0F0: /* Cortex-A15 */
- armpmu = armv7_a15_pmu_init();
+ cpu_pmu = armv7_a15_pmu_init();
break;
}
/* Intel CPUs [xscale]. */
part_number = (cpuid >> 13) & 0x7;
switch (part_number) {
case 1:
- armpmu = xscale1pmu_init();
+ cpu_pmu = xscale1pmu_init();
break;
case 2:
- armpmu = xscale2pmu_init();
+ cpu_pmu = xscale2pmu_init();
break;
}
}
- if (armpmu) {
+ if (cpu_pmu) {
pr_info("enabled with %s PMU driver, %d counters available\n",
- armpmu->name, armpmu->num_events);
- cpu_pmu_init(armpmu);
- armpmu_register(armpmu, "cpu", PERF_TYPE_RAW);
+ cpu_pmu->name, cpu_pmu->num_events);
+ cpu_pmu_init(cpu_pmu);
+ armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
}
int idx)
{
unsigned long val, mask, evt, flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
/*
armv6pmu_start(void)
{
unsigned long flags, val;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
armv6pmu_stop(void)
{
unsigned long flags, val;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
}
static int
-armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
+armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
/* Always place a cycle counter into the cycle counter. */
int idx)
{
unsigned long val, mask, evt, flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
int idx)
{
unsigned long val, mask, flags, evt = 0;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
*/
#define ARMV7_IDX_CYCLE_COUNTER 0
#define ARMV7_IDX_COUNTER0 1
-#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + armpmu->num_events - 1)
+#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
#define ARMV7_MAX_COUNTERS 32
#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/*
* Enable counter and interrupt, and set the counter to count
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/*
* Disable counter and interrupt
{
u32 pmnc;
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
/*
static void armv7pmu_start(void)
{
unsigned long flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
static void armv7pmu_stop(void)
{
unsigned long flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
+static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx;
* For anything other than a cycle counter, try and use
* the events counters
*/
- for (idx = ARMV7_IDX_COUNTER0; idx < armpmu->num_events; ++idx) {
+ for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
static void armv7pmu_reset(void *info)
{
- u32 idx, nb_cnt = armpmu->num_events;
+ u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
{
unsigned long pmnc;
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
irq_work_run();
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
}
static int
-xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
+xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
if (XSCALE_PERFCTR_CCNT == event->config_base) {
xscale1pmu_start(void)
{
unsigned long flags, val;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
xscale1pmu_stop(void)
{
unsigned long flags, val;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
irq_work_run();
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
}
static int
-xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
+xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx = xscale1pmu_get_event_idx(cpuc, event);
xscale2pmu_start(void)
{
unsigned long flags, val;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
xscale2pmu_stop(void)
{
unsigned long flags, val;
- struct cpu_hw_events *events = armpmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc();