#include <linux/hardirq.h>
#include <asm/reg.h>
#include <asm/pmc.h>
+#include <asm/machdep.h>
struct cpu_hw_counters {
int n_counters;
struct perf_counter *counter[MAX_HWCOUNTERS];
unsigned int events[MAX_HWCOUNTERS];
u64 mmcr[3];
+ u8 pmcs_enabled;
};
DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
cpuhw->disabled = 1;
cpuhw->n_added = 0;
+ /*
+ * Check if we ever enabled the PMU on this cpu.
+ */
+ if (!cpuhw->pmcs_enabled) {
+ if (ppc_md.enable_pmcs)
+ ppc_md.enable_pmcs();
+ cpuhw->pmcs_enabled = 1;
+ }
+
/*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
+ if (cpuhw->n_counters == 0)
+ get_lppaca()->pmcregs_in_use = 0;
goto out;
}
* bit set and set the hardware counters to their initial values.
* Then unfreeze the counters.
*/
+ get_lppaca()->pmcregs_in_use = 1;
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
}
}
+void hw_perf_counter_setup(int cpu)
+{
+ struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+
+ memset(cpuhw, 0, sizeof(*cpuhw));
+ cpuhw->mmcr[0] = MMCR0_FC;
+}
+
extern struct power_pmu ppc970_pmu;
extern struct power_pmu power6_pmu;
u64 __weak hw_perf_save_disable(void) { return 0; }
void __weak hw_perf_restore(u64 ctrl) { barrier(); }
-void __weak hw_perf_counter_setup(void) { barrier(); }
+void __weak hw_perf_counter_setup(int cpu) { barrier(); }
int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx, int cpu)
cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
mutex_unlock(&perf_resource_mutex);
- hw_perf_counter_setup();
+ hw_perf_counter_setup(cpu);
}
#ifdef CONFIG_HOTPLUG_CPU