int num_cntrs;
atomic_t active_events;
struct mutex reserve_mutex;
- struct list_head entry;
+ struct hlist_node node;
cpumask_t cpus;
};
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
-static DEFINE_MUTEX(cci_pmu_mutex);
-static LIST_HEAD(cci_pmu_list);
-
enum cci_models {
#ifdef CONFIG_ARM_CCI400_PMU
CCI400_R0,
return perf_pmu_register(&cci_pmu->pmu, name, -1);
}
-static int cci_pmu_offline_cpu(unsigned int cpu)
+static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct cci_pmu *cci_pmu;
+ struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
unsigned int target;
- mutex_lock(&cci_pmu_mutex);
- list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
- if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
- continue;
- target = cpumask_any_but(cpu_online_mask, cpu);
- if (target >= nr_cpu_ids)
- continue;
- /*
- * TODO: migrate context once core races on event->ctx have
- * been fixed.
- */
- cpumask_set_cpu(target, &cci_pmu->cpus);
- }
- mutex_unlock(&cci_pmu_mutex);
+ if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
+ return 0;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+ /*
+ * TODO: migrate context once core races on event->ctx have
+ * been fixed.
+ */
+ cpumask_set_cpu(target, &cci_pmu->cpus);
return 0;
}
if (ret)
return ret;
- mutex_lock(&cci_pmu_mutex);
- list_add(&cci_pmu->entry, &cci_pmu_list);
- mutex_unlock(&cci_pmu_mutex);
-
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ &cci_pmu->node);
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
}
{
int ret;
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
- "AP_PERF_ARM_CCI_ONLINE", NULL,
- cci_pmu_offline_cpu);
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ "AP_PERF_ARM_CCI_ONLINE", NULL,
+ cci_pmu_offline_cpu);
if (ret)
return ret;