ANDROID: sched/fair: fix CPU selection for non latency sensitive tasks
authorPavankumar Kondeti <pkondeti@codeaurora.org>
Thu, 8 Feb 2018 11:13:45 +0000 (16:43 +0530)
committerIonela Voinescu <ionela.voinescu@arm.com>
Wed, 18 Jul 2018 09:47:25 +0000 (10:47 +0100)
The Non latency sensitive tasks CPU selection targets for an active
CPU in the little cluster. The shallowest c-state CPU is stored as
a backup. However if all CPUs in the little cluster are idle, we pick
an active CPU in the BIG cluster as the target CPU. This incorrect
choice of the target CPU may not get corrected by the
select_energy_cpu_idx() depending on the energy difference between
previous CPU and target CPU.

This can be fixed easily by maintaining the same variable that tracks
maximum capacity of the traversed CPU for both idle and active CPUs.

Change-Id: I3efb8bc82ff005383163921ef2bd39fcac4589ad
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
kernel/sched/fair.c

index 61b7bcce11c8135d586740b1e946b0008986a1ee..58fe4fc1310556efe6b4dc8ab424f8d75858f0a1 100644 (file)
@@ -6908,7 +6908,6 @@ static int start_cpu(bool boosted)
 static inline int find_best_target(struct task_struct *p, int *backup_cpu,
                                   bool boosted, bool prefer_idle)
 {
-       unsigned long best_idle_min_cap_orig = ULONG_MAX;
        unsigned long min_util = boosted_task_util(p);
        unsigned long target_capacity = ULONG_MAX;
        unsigned long min_wake_util = ULONG_MAX;
@@ -7052,6 +7051,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
                            (capacity_orig * SCHED_CAPACITY_SCALE))
                                continue;
 
+                       /*
+                        * Favor CPUs with smaller capacity for non latency
+                        * sensitive tasks.
+                        */
+                       if (capacity_orig > target_capacity)
+                               continue;
+
                        /*
                         * Case B) Non latency sensitive tasks on IDLE CPUs.
                         *
@@ -7079,10 +7085,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
                        if (idle_cpu(i)) {
                                int idle_idx = idle_get_state_idx(cpu_rq(i));
 
-                               /* Select idle CPU with lower cap_orig */
-                               if (capacity_orig > best_idle_min_cap_orig)
-                                       continue;
-
                                /*
                                 * Skip CPUs in deeper idle state, but only
                                 * if they are also less energy efficient.
@@ -7093,8 +7095,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
                                    best_idle_cstate <= idle_idx)
                                        continue;
 
-                               /* Keep track of best idle CPU */
-                               best_idle_min_cap_orig = capacity_orig;
+                               target_capacity = capacity_orig;
                                best_idle_cstate = idle_idx;
                                best_idle_cpu = i;
                                continue;
@@ -7120,10 +7121,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
                         * capacity.
                         */
 
-                       /* Favor CPUs with smaller capacity */
-                       if (capacity_orig > target_capacity)
-                               continue;
-
                        /* Favor CPUs with maximum spare capacity */
                        if ((capacity_orig - new_util) < target_max_spare_cap)
                                continue;