*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return -ENOENT;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ /*
+ * Reject CPU-affine events for CPUs that are of a different class to
+ * that which this PMU handles. Process-following events (where
+ * event->cpu == -1) can be migrated between CPUs, and thus we have to
+ * reject them later (in armpmu_add) if they're scheduled on a
+ * different class of CPU.
+ */
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
+
if (enabled)
armpmu->start(armpmu);
}
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
+
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
+
armpmu->stop(armpmu);
}
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
void *hcpu)
{
+ int cpu = (unsigned long)hcpu;
struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return NOTIFY_DONE;
+
if (pmu->reset)
pmu->reset(pmu);
else
/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu->reset)
- on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
+ on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
+ cpu_pmu, 1);
/* If no interrupts available, set the corresponding capability flag */
if (!platform_get_irq(cpu_pmu->plat_device, 0))
}
irqs[i] = cpu;
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
}
- if (i == pdev->num_resources)
+ if (i == pdev->num_resources) {
pmu->irq_affinity = irqs;
- else
+ } else {
kfree(irqs);
+ cpumask_setall(&pmu->supported_cpus);
+ }
return 0;
}
ret = init_fn(pmu);
} else {
ret = probe_current_pmu(pmu);
+ cpumask_setall(&pmu->supported_cpus);
}
if (ret) {