cpu: Support cpus_up/dow for fast hotplug
authorYoungtae Lee <yt0729.lee@samsung.com>
Mon, 23 Apr 2018 03:57:31 +0000 (12:57 +0900)
committerlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 23 Jul 2018 05:59:00 +0000 (14:59 +0900)
Change-Id: Ib0b3da952426338a4afc7e388c9fd5d6874cdce7
Signed-off-by: Youngtae Lee <yt0729.lee@samsung.com>
include/linux/cpumask.h
kernel/cpu.c

index 8d3125c493b2322478ccca401d932797dec34850..bd5282063859bf1a052063ed539c76d22777e111 100644 (file)
@@ -115,6 +115,9 @@ extern struct cpumask __cpu_active_mask;
 #define cpu_active(cpu)                ((cpu) == 0)
 #endif
 
+extern struct cpumask cpu_fastoff_mask;
+extern struct cpumask cpu_faston_mask;
+
 /* verify cpu argument to cpumask_* operators */
 static inline unsigned int cpumask_check(unsigned int cpu)
 {
index a42b9eaf57824ce7d5f37743eb4ca26ebb54a55a..a4ffb4b945660d8120a10bd681af8629319e6741 100644 (file)
@@ -849,6 +849,100 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
        }
        return ret;
 }
+static int __ref _cpus_down(struct cpumask cpus, int tasks_frozen,
+                          enum cpuhp_state target)
+{
+       struct cpuhp_cpu_state *st;
+       int prev_state, ret = 0;
+       int cpu;
+
+       if (num_online_cpus() == 1)
+               return -EBUSY;
+
+       for_each_cpu(cpu, &cpus)
+               if (!cpu_present(cpu))
+                       return -EINVAL;
+
+       cpus_write_lock();
+
+       cpuhp_tasks_frozen = tasks_frozen;
+
+       cpumask_copy(&cpu_fastoff_mask, &cpus);
+       for_each_cpu(cpu, &cpus) {
+               st = per_cpu_ptr(&cpuhp_state, cpu);
+               prev_state = cpuhp_set_state(st, target);
+       }
+
+       for_each_cpu(cpu, &cpus) {
+               st = per_cpu_ptr(&cpuhp_state, cpu);
+               if (st->state <= CPUHP_TEARDOWN_CPU)
+                       continue;
+
+               st->target = max((int)target, CPUHP_TEARDOWN_CPU);
+               ret = cpuhp_kick_ap_work(cpu);
+               /*
+                * The AP side has done the error rollback already. Just
+                * return the error code..
+                */
+               if (ret)
+                       goto out;
+
+               /*
+                * We might have stopped still in the range of the AP hotplug
+                * thread. Nothing to do anymore.
+                */
+               if (st->state > CPUHP_TEARDOWN_CPU)
+                       goto out;
+
+               st->target = target;
+       }
+
+       for_each_cpu(cpu, &cpus) {
+               st = per_cpu_ptr(&cpuhp_state, cpu);
+               ret = cpuhp_down_callbacks(cpu, st, target);
+               if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+                       cpuhp_reset_state(st, prev_state);
+                       __cpuhp_kick_ap(st);
+               }
+       }
+
+out:
+       cpumask_clear(&cpu_fastoff_mask);
+       cpus_write_unlock();
+
+       /*
+        * Do post unplug cleanup. This is still protected against
+        * concurrent CPU hotplug via cpu_add_remove_lock.
+        */
+       lockup_detector_cleanup();
+
+       return ret;
+}
+
+int cpus_down(struct cpumask cpus)
+{
+       int err, cpu;
+
+       cpu_maps_update_begin();
+
+       if (cpu_hotplug_disabled) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       for_each_cpu(cpu, &cpus)
+               if (!cpu_online(cpu)) {
+                       cpumask_clear_cpu(cpu, &cpus);
+                       pr_warn("cpus_down: cpu%d is not online\n", cpu);
+               }
+
+       err = _cpus_down(cpus, 0, CPUHP_OFFLINE);
+
+out:
+       cpu_maps_update_done();
+       return err;
+}
+EXPORT_SYMBOL_GPL(cpus_down);
 
 /* Requires cpu_add_remove_lock to be held */
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
@@ -1075,6 +1169,27 @@ int cpu_up(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(cpu_up);
 
+int cpus_up(struct cpumask cpus)
+{
+       int cpu, ret;
+
+       for_each_cpu(cpu, &cpus)
+               if (cpu_online(cpu)) {
+                       cpumask_clear_cpu(cpu, &cpus);
+                       pr_warn("cpus_up: cpu%d is already online\n", cpu);
+               }
+
+       cpumask_copy(&cpu_faston_mask, &cpus);
+
+       for_each_cpu(cpu, &cpus)
+               ret = do_cpu_up((unsigned int)cpu, CPUHP_ONLINE);
+
+       cpumask_clear(&cpu_faston_mask);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(cpus_up);
+
 #ifdef CONFIG_PM_SLEEP_SMP
 static cpumask_var_t frozen_cpus;
 
@@ -1212,6 +1327,10 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
 }
 
 
+struct cpumask cpu_fastoff_mask;
+EXPORT_SYMBOL(cpu_fastoff_mask);
+struct cpumask cpu_faston_mask;
+EXPORT_SYMBOL(cpu_faston_mask);
 static int __init cpu_hotplug_pm_sync_init(void)
 {
        /*
@@ -1220,6 +1339,9 @@ static int __init cpu_hotplug_pm_sync_init(void)
         * to disable cpu hotplug to avoid cpu hotplug race.
         */
        pm_notifier(cpu_hotplug_pm_callback, 0);
+       cpumask_clear(&cpu_fastoff_mask);
+       cpumask_clear(&cpu_faston_mask);
+
        return 0;
 }
 core_initcall(cpu_hotplug_pm_sync_init);