EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
-/**
- * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
- * @cpu_dev: CPU device for which we do this operation
- * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
- *
- * This marks OPP table of the @cpu_dev as shared by the CPUs present in
- * @cpumask.
- *
- * Returns -ENODEV if OPP table isn't already present.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- */
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
-{
- struct opp_device *opp_dev;
- struct opp_table *opp_table;
- struct device *dev;
- int cpu, ret = 0;
-
- mutex_lock(&opp_table_lock);
-
- opp_table = _find_opp_table(cpu_dev);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
- goto unlock;
- }
-
- for_each_cpu(cpu, cpumask) {
- if (cpu == cpu_dev->id)
- continue;
-
- dev = get_cpu_device(cpu);
- if (!dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- continue;
- }
-
- opp_dev = _add_opp_dev(dev, opp_table);
- if (!opp_dev) {
- dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
- __func__, cpu);
- continue;
- }
- }
-unlock:
- mutex_unlock(&opp_table_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
-
#ifdef CONFIG_OF
/**
* dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
#endif
+
+/**
+ * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
+ *
+ * This marks OPP table of the @cpu_dev as shared by the CPUs present in
+ * @cpumask.
+ *
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
+ struct device *dev;
+ int cpu, ret = 0;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto unlock;
+ }
+
+ for_each_cpu(cpu, cpumask) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+ }
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);