int max_events;
};
-static struct x86_pmu *x86_pmu __read_mostly;
+static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
.enabled = 1,
disable_lapic_nmi_watchdog();
for (i = 0; i < nr_counters_generic; i++) {
- if (!reserve_perfctr_nmi(x86_pmu->perfctr + i))
+ if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}
for (i = 0; i < nr_counters_generic; i++) {
- if (!reserve_evntsel_nmi(x86_pmu->eventsel + i))
+ if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
eventsel_fail:
for (i--; i >= 0; i--)
- release_evntsel_nmi(x86_pmu->eventsel + i);
+ release_evntsel_nmi(x86_pmu.eventsel + i);
i = nr_counters_generic;
perfctr_fail:
for (i--; i >= 0; i--)
- release_perfctr_nmi(x86_pmu->perfctr + i);
+ release_perfctr_nmi(x86_pmu.perfctr + i);
if (nmi_watchdog == NMI_LOCAL_APIC)
enable_lapic_nmi_watchdog();
int i;
for (i = 0; i < nr_counters_generic; i++) {
- release_perfctr_nmi(x86_pmu->perfctr + i);
- release_evntsel_nmi(x86_pmu->eventsel + i);
+ release_perfctr_nmi(x86_pmu.perfctr + i);
+ release_evntsel_nmi(x86_pmu.eventsel + i);
}
if (nmi_watchdog == NMI_LOCAL_APIC)
* Raw event type provide the config in the event structure
*/
if (perf_event_raw(hw_event)) {
- hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event));
+ hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
} else {
- if (perf_event_id(hw_event) >= x86_pmu->max_events)
+ if (perf_event_id(hw_event) >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
- hwc->config |= x86_pmu->event_map(perf_event_id(hw_event));
+ hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
}
counter->destroy = hw_perf_counter_destroy;
if (unlikely(!perf_counters_initialized))
return 0;
- return x86_pmu->save_disable_all();
+ return x86_pmu.save_disable_all();
}
/*
* Exported because of ACPI idle
if (unlikely(!perf_counters_initialized))
return;
- x86_pmu->restore_all(ctrl);
+ x86_pmu.restore_all(ctrl);
}
/*
* Exported because of ACPI idle
if (unlikely(!perf_counters_initialized))
return;
- x86_pmu->enable(idx, config);
+ x86_pmu.enable(idx, config);
}
static void intel_pmu_disable_counter(int idx, u64 config)
if (unlikely(!perf_counters_initialized))
return;
- x86_pmu->disable(idx, config);
+ x86_pmu.disable(idx, config);
}
static inline void
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
- if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
- if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
return X86_PMC_IDX_FIXED_CPU_CYCLES;
- if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
return X86_PMC_IDX_FIXED_BUS_CYCLES;
return -1;
set_bit(idx, cpuc->used);
hwc->idx = idx;
}
- hwc->config_base = x86_pmu->eventsel;
- hwc->counter_base = x86_pmu->perfctr;
+ hwc->config_base = x86_pmu.eventsel;
+ hwc->counter_base = x86_pmu.perfctr;
}
perf_counters_lapic_init(hwc->nmi);
pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
for (idx = 0; idx < nr_counters_generic; idx++) {
- rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl);
- rdmsrl(x86_pmu->perfctr + idx, pmc_count);
+ rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
+ rdmsrl(x86_pmu.perfctr + idx, pmc_count);
prev_left = per_cpu(prev_left[idx], cpu);
irq_enter();
apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
ack_APIC_irq();
- x86_pmu->handle_irq(regs, 0);
+ x86_pmu.handle_irq(regs, 0);
irq_exit();
}
regs = args->regs;
apic_write(APIC_LVTPC, APIC_DM_NMI);
- ret = x86_pmu->handle_irq(regs, 1);
+ ret = x86_pmu.handle_irq(regs, 1);
return ret ? NOTIFY_STOP : NOTIFY_OK;
}
pr_info("... bit width: %d\n", eax.split.bit_width);
pr_info("... mask length: %d\n", eax.split.mask_length);
- x86_pmu = &intel_pmu;
+ x86_pmu = intel_pmu;
nr_counters_generic = eax.split.num_counters;
nr_counters_fixed = edx.split.num_counters_fixed;
static int amd_pmu_init(void)
{
- x86_pmu = &amd_pmu;
+ x86_pmu = amd_pmu;
nr_counters_generic = 4;
nr_counters_fixed = 0;