sched: ems: prevent access to plugged out cpu.
authorPark Bumgyu <bumgyu.park@samsung.com>
Tue, 12 Jun 2018 01:23:31 +0000 (10:23 +0900)
committerlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 23 Jul 2018 05:59:22 +0000 (14:59 +0900)
Change-Id: Id9e0ac5cb1979cd8d3766f9fb1a7c0874a561e7b
Signed-off-by: Park Bumgyu <bumgyu.park@samsung.com>
kernel/sched/ems/core.c
kernel/sched/ems/energy.c
kernel/sched/ems/ontime.c
kernel/sched/ems/st_addon.c

index fa23b26716f7211b509053ec581f2cd33a8b274e..e13179ed3e3e7294e2de08bbd8c79324be57b412 100644 (file)
@@ -122,7 +122,7 @@ static int select_proper_cpu(struct task_struct *p, int prev_cpu)
        unsigned long best_min_util = ULONG_MAX;
        int best_cpu = -1;
 
-       for_each_possible_cpu(cpu) {
+       for_each_cpu(cpu, cpu_active_mask) {
                int i;
 
                /* visit each coregroup only once */
index 4b403ff63e11cd19e2420d5aca039f1350695e92..de31fe908d59ea4392636cd99c6a9c9bf066070e 100644 (file)
@@ -157,7 +157,7 @@ static unsigned int calculate_energy(struct task_struct *p, int target_cpu)
                        util[cpu] += task_util_est(p);
        }
 
-       for_each_possible_cpu(cpu) {
+       for_each_cpu(cpu, cpu_active_mask) {
                struct energy_table *table;
                unsigned long max_util = 0, util_sum = 0;
                unsigned long capacity;
index a5736220e240c2683a6c760c361830b975cf3dce..e102284be97293105446a3ea5f22f75fed86f164 100644 (file)
@@ -408,7 +408,7 @@ void ontime_migration(void)
        if (!spin_trylock(&om_lock))
                return;
 
-       for_each_possible_cpu(cpu) {
+       for_each_cpu(cpu, cpu_active_mask) {
                unsigned long flags;
                struct rq *rq = cpu_rq(cpu);
                struct sched_entity *se;
index 0909af2a5b5a76f26b76dc13c6e47c9702b97d29..7a38172d7ebc4e1a55e1f6bf1ca600d831b641f0 100644 (file)
@@ -94,7 +94,7 @@ static int select_idle_cpu(struct task_struct *p)
        int i;
        char state[30] = "prev_cpu";
 
-       for_each_possible_cpu(cpu) {
+       for_each_cpu(cpu, cpu_active_mask) {
                if (cpu != cpumask_first(cpu_coregroup_mask(cpu)))
                        continue;
 
@@ -102,9 +102,6 @@ static int select_idle_cpu(struct task_struct *p)
                        unsigned long capacity_orig = capacity_orig_of(i);
                        unsigned long new_util, wake_util;
 
-                       if (!cpu_active(i))
-                               continue;
-
                        wake_util = cpu_util_wake(i, p);
                        new_util = wake_util + task_util_est(p);