{
struct hw_perf_event *hwc = &event->hw;
- if (!armpmu)
- return;
-
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
{
struct hw_perf_event *hwc = &event->hw;
- if (!armpmu)
- return;
-
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
return -ENOENT;
}
- if (!armpmu)
- return -ENODEV;
-
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) {
int idx, enabled = 0;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (!armpmu)
- return;
-
for (idx = 0; idx < armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
static void armpmu_disable(struct pmu *pmu)
{
- if (armpmu)
- armpmu->stop();
+ armpmu->stop();
}
static struct pmu pmu = {
if (armpmu) {
pr_info("enabled with %s PMU driver, %d counters available\n",
armpmu->name, armpmu->num_events);
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
}
- perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-
return 0;
}
early_initcall(init_hw_perf_events);